file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
pad_action_entry.rs
// Take a look at the license at the top of the repository in the LICENSE file. use crate::PadActionType; #[derive(Debug, Clone)] pub struct PadActionEntry { type_: PadActionType, index: i32, mode: i32, label: String, action_name: String, } impl PadActionEntry { pub fn new( type_: PadActionType, index: i32, mode: i32, label: &str, action_name: &str, ) -> PadActionEntry
#[doc(alias = "get_type")] pub fn type_(&self) -> PadActionType { self.type_ } #[doc(alias = "get_index")] pub fn index(&self) -> i32 { self.index } #[doc(alias = "get_mode")] pub fn mode(&self) -> i32 { self.mode } #[doc(alias = "get_label")] pub fn label(&self) -> &str { &self.label } #[doc(alias = "get_action_name")] pub fn action_name(&self) -> &str { &self.action_name } }
{ assert_initialized_main_thread!(); PadActionEntry { type_, index, mode, label: label.to_owned(), action_name: action_name.to_owned(), } }
cellular_banner_test.js
// Copyright 2021 The Chromium Authors. All rights reserved.
// clang-format off // #import 'chrome://os-settings/strings.m.js' // #import 'chrome://os-settings/chromeos/os_settings.js'; // #import {assertTrue} from '../../chai_assert.js'; // #import {flush, Polymer} from 'chrome://resources/polymer/v3_0/polymer/polymer_bundled.min.js'; // clang-format on suite('CellularBanner', function() { let cellularBanner; setup(function() { cellularBanner = document.createElement('cellular-banner'); cellularBanner.deviceState = { type: chromeos.networkConfig.mojom.NetworkType.kCellular, deviceState: chromeos.networkConfig.mojom.DeviceStateType.kEnabled, inhibitReason: chromeos.networkConfig.mojom.InhibitReason.kInstallingProfile }; assertTrue(!!cellularBanner); document.body.appendChild(cellularBanner); Polymer.dom.flush(); }); test('Base test', function() { const message = cellularBanner.i18n('cellularNetworkInstallingProfile'); const bannerMessage = cellularBanner.$.bannerMessage; assertTrue(!!bannerMessage); assertEquals(bannerMessage.textContent.trim(), message); }); });
// Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file.
factory.go
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by informer-gen. DO NOT EDIT. package externalversions import ( reflect "reflect" sync "sync" time "time" versioned "github.com/rook/cassandra/pkg/client/clientset/versioned" cassandrarookio "github.com/rook/cassandra/pkg/client/informers/externalversions/cassandra.rook.io" internalinterfaces "github.com/rook/cassandra/pkg/client/informers/externalversions/internalinterfaces" rookio "github.com/rook/cassandra/pkg/client/informers/externalversions/rook.io" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory type sharedInformerFactory struct { client versioned.Interface namespace string tweakListOptions internalinterfaces.TweakListOptionsFunc lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { return func(factory *sharedInformerFactory) *sharedInformerFactory { for k, v := range resyncConfig { factory.customResync[reflect.TypeOf(k)] = v } return factory } } // WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { return func(factory *sharedInformerFactory) *sharedInformerFactory { factory.tweakListOptions = tweakListOptions return factory } } // WithNamespace limits the SharedInformerFactory to the specified namespace. func WithNamespace(namespace string) SharedInformerOption { return func(factory *sharedInformerFactory) *sharedInformerFactory { factory.namespace = namespace return factory } } // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) } // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. // Listers obtained via this SharedInformerFactory will be subject to the same filters // as specified here. // Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) } // NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { factory := &sharedInformerFactory{ client: client, namespace: v1.NamespaceAll, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), customResync: make(map[reflect.Type]time.Duration), } // Apply all options for _, opt := range options { factory = opt(factory) } return factory } // Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() for informerType, informer := range f.informers { if !f.startedInformers[informerType] { go informer.Run(stopCh) f.startedInformers[informerType] = true } } } // WaitForCacheSync waits for all started informers' cache were synced. func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() defer f.lock.Unlock() informers := map[reflect.Type]cache.SharedIndexInformer{} for informerType, informer := range f.informers { if f.startedInformers[informerType] { informers[informerType] = informer } } return informers }() res := map[reflect.Type]bool{} for informType, informer := range informers { res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) } return res } // InternalInformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() defer f.lock.Unlock() informerType := reflect.TypeOf(obj) informer, exists := f.informers[informerType] if exists
resyncPeriod, exists := f.customResync[informerType] if !exists { resyncPeriod = f.defaultResync } informer = newFunc(f.client, resyncPeriod) f.informers[informerType] = informer return informer } // SharedInformerFactory provides shared informers for resources in all known // API group versions. type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Cassandra() cassandrarookio.Interface Rook() rookio.Interface } func (f *sharedInformerFactory) Cassandra() cassandrarookio.Interface { return cassandrarookio.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Rook() rookio.Interface { return rookio.New(f, f.namespace, f.tweakListOptions) }
{ return informer }
carstate.py
import numpy as np from cereal import car from selfdrive.config import Conversions as CV from selfdrive.car.interfaces import CarStateBase from opendbc.can.parser import CANParser from opendbc.can.can_define import CANDefine from selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, NetworkLocation, TransmissionType, GearShifter, BUTTON_STATES, CarControllerParams class CarState(CarStateBase): def __init__(self, CP): super().__init__(CP) can_define = CANDefine(DBC_FILES.mqb) if CP.transmissionType == TransmissionType.automatic: self.shifter_values = can_define.dv["Getriebe_11"]["GE_Fahrstufe"] elif CP.transmissionType == TransmissionType.direct: self.shifter_values = can_define.dv["EV_Gearshift"]["GearPosition"] self.hca_status_values = can_define.dv["LH_EPS_03"]["EPS_HCA_Status"] self.buttonStates = BUTTON_STATES.copy() def update(self, pt_cp, cam_cp, ext_cp, trans_type): ret = car.CarState.new_message() # Update vehicle speed and acceleration from ABS wheel speeds. ret.wheelSpeeds.fl = pt_cp.vl["ESP_19"]["ESP_VL_Radgeschw_02"] * CV.KPH_TO_MS ret.wheelSpeeds.fr = pt_cp.vl["ESP_19"]["ESP_VR_Radgeschw_02"] * CV.KPH_TO_MS ret.wheelSpeeds.rl = pt_cp.vl["ESP_19"]["ESP_HL_Radgeschw_02"] * CV.KPH_TO_MS ret.wheelSpeeds.rr = pt_cp.vl["ESP_19"]["ESP_HR_Radgeschw_02"] * CV.KPH_TO_MS ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr])) ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw) ret.standstill = ret.vEgoRaw < 0.1 # Update steering angle, rate, yaw rate, and driver input torque. VW send # the sign/direction in a separate signal so they must be recombined. ret.steeringAngleDeg = pt_cp.vl["LH_EPS_03"]["EPS_Berechneter_LW"] * (1, -1)[int(pt_cp.vl["LH_EPS_03"]["EPS_VZ_BLW"])] ret.steeringRateDeg = pt_cp.vl["LWI_01"]["LWI_Lenkradw_Geschw"] * (1, -1)[int(pt_cp.vl["LWI_01"]["LWI_VZ_Lenkradw_Geschw"])] ret.steeringTorque = pt_cp.vl["LH_EPS_03"]["EPS_Lenkmoment"] * (1, -1)[int(pt_cp.vl["LH_EPS_03"]["EPS_VZ_Lenkmoment"])] ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE ret.yawRate = pt_cp.vl["ESP_02"]["ESP_Gierrate"] * (1, -1)[int(pt_cp.vl["ESP_02"]["ESP_VZ_Gierrate"])] * CV.DEG_TO_RAD # Verify EPS readiness to accept steering commands hca_status = self.hca_status_values.get(pt_cp.vl["LH_EPS_03"]["EPS_HCA_Status"]) ret.steerError = hca_status in ["DISABLED", "FAULT"] ret.steerWarning = hca_status in ["INITIALIZING", "REJECTED"] # Update gas, brakes, and gearshift. ret.gas = pt_cp.vl["Motor_20"]["MO_Fahrpedalrohwert_01"] / 100.0 ret.gasPressed = ret.gas > 0 ret.brake = pt_cp.vl["ESP_05"]["ESP_Bremsdruck"] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects ret.brakePressed = bool(pt_cp.vl["ESP_05"]["ESP_Fahrer_bremst"]) # Update gear and/or clutch position data. if trans_type == TransmissionType.automatic: ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["Getriebe_11"]["GE_Fahrstufe"], None)) elif trans_type == TransmissionType.direct: ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl["EV_Gearshift"]["GearPosition"], None)) elif trans_type == TransmissionType.manual: ret.clutchPressed = not pt_cp.vl["Motor_14"]["MO_Kuppl_schalter"] if bool(pt_cp.vl["Gateway_72"]["BCM1_Rueckfahrlicht_Schalter"]): ret.gearShifter = GearShifter.reverse else: ret.gearShifter = GearShifter.drive # Update door and trunk/hatch lid open status. ret.doorOpen = any([pt_cp.vl["Gateway_72"]["ZV_FT_offen"], pt_cp.vl["Gateway_72"]["ZV_BT_offen"], pt_cp.vl["Gateway_72"]["ZV_HFS_offen"], pt_cp.vl["Gateway_72"]["ZV_HBFS_offen"], pt_cp.vl["Gateway_72"]["ZV_HD_offen"]]) # Update seatbelt fastened status. ret.seatbeltUnlatched = pt_cp.vl["Airbag_02"]["AB_Gurtschloss_FA"] != 3 # Update driver preference for metric. VW stores many different unit # preferences, including separate units for for distance vs. speed. # We use the speed preference for OP. self.displayMetricUnits = not pt_cp.vl["Einheiten_01"]["KBI_MFA_v_Einheit_02"] # Consume blind-spot monitoring info/warning LED states, if available. # Infostufe: BSM LED on, Warnung: BSM LED flashing if self.CP.enableBsm: ret.leftBlindspot = bool(ext_cp.vl["SWA_01"]["SWA_Infostufe_SWA_li"]) or bool(ext_cp.vl["SWA_01"]["SWA_Warnung_SWA_li"]) ret.rightBlindspot = bool(ext_cp.vl["SWA_01"]["SWA_Infostufe_SWA_re"]) or bool(ext_cp.vl["SWA_01"]["SWA_Warnung_SWA_re"]) # Consume factory LDW data relevant for factory SWA (Lane Change Assist) # and capture it for forwarding to the blind spot radar controller self.ldw_lane_warning_left = bool(cam_cp.vl["LDW_02"]["LDW_SW_Warnung_links"]) self.ldw_lane_warning_right = bool(cam_cp.vl["LDW_02"]["LDW_SW_Warnung_rechts"]) self.ldw_side_dlc_tlc = bool(cam_cp.vl["LDW_02"]["LDW_Seite_DLCTLC"]) self.ldw_dlc = cam_cp.vl["LDW_02"]["LDW_DLC"] self.ldw_tlc = cam_cp.vl["LDW_02"]["LDW_TLC"] # Stock FCW is considered active if the release bit for brake-jerk warning # is set. Stock AEB considered active if the partial braking or target # braking release bits are set. # Refer to VW Self Study Program 890253: Volkswagen Driver Assistance # Systems, chapter on Front Assist with Braking: Golf Family for all MQB ret.stockFcw = bool(ext_cp.vl["ACC_10"]["AWV2_Freigabe"]) ret.stockAeb = bool(ext_cp.vl["ACC_10"]["ANB_Teilbremsung_Freigabe"]) or bool(ext_cp.vl["ACC_10"]["ANB_Zielbremsung_Freigabe"]) # Update ACC radar status. accStatus = pt_cp.vl["TSK_06"]["TSK_Status"] if accStatus == 2: # ACC okay and enabled, but not currently engaged ret.cruiseState.available = True ret.cruiseState.enabled = False elif accStatus in [3, 4, 5]: # ACC okay and enabled, currently engaged and regulating speed (3) or engaged with driver accelerating (4) or overrun (5) ret.cruiseState.available = True ret.cruiseState.enabled = True else: # ACC okay but disabled (1), or a radar visibility or other fault/disruption (6 or 7) ret.cruiseState.available = False ret.cruiseState.enabled = False # Update ACC setpoint. When the setpoint is zero or there's an error, the # radar sends a set-speed of ~90.69 m/s / 203mph. ret.cruiseState.speed = ext_cp.vl["ACC_02"]["ACC_Wunschgeschw"] * CV.KPH_TO_MS if ret.cruiseState.speed > 90: ret.cruiseState.speed = 0 # Update control button states for turn signals and ACC controls. self.buttonStates["accelCruise"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Tip_Hoch"]) self.buttonStates["decelCruise"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Tip_Runter"]) self.buttonStates["cancel"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Abbrechen"]) self.buttonStates["setCruise"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Tip_Setzen"]) self.buttonStates["resumeCruise"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Tip_Wiederaufnahme"]) self.buttonStates["gapAdjustCruise"] = bool(pt_cp.vl["GRA_ACC_01"]["GRA_Verstellung_Zeitluecke"]) ret.leftBlinker = bool(pt_cp.vl["Blinkmodi_02"]["Comfort_Signal_Left"]) ret.rightBlinker = bool(pt_cp.vl["Blinkmodi_02"]["Comfort_Signal_Right"]) # Read ACC hardware button type configuration info that has to pass thru # to the radar. Ends up being different for steering wheel buttons vs # third stalk type controls. self.graHauptschalter = pt_cp.vl["GRA_ACC_01"]["GRA_Hauptschalter"] self.graTypHauptschalter = pt_cp.vl["GRA_ACC_01"]["GRA_Typ_Hauptschalter"] self.graButtonTypeInfo = pt_cp.vl["GRA_ACC_01"]["GRA_ButtonTypeInfo"] self.graTipStufe2 = pt_cp.vl["GRA_ACC_01"]["GRA_Tip_Stufe_2"] # Pick up the GRA_ACC_01 CAN message counter so we can sync to it for # later cruise-control button spamming. self.graMsgBusCounter = pt_cp.vl["GRA_ACC_01"]["COUNTER"] # Additional safety checks performed in CarInterface. self.parkingBrakeSet = bool(pt_cp.vl["Kombi_01"]["KBI_Handbremse"]) # FIXME: need to include an EPB check as well ret.espDisabled = pt_cp.vl["ESP_21"]["ESP_Tastung_passiv"] != 0 return ret @staticmethod def get_can_parser(CP): # this function generates lists for signal, messages and initial values signals = [ # sig_name, sig_address, default ("EPS_Berechneter_LW", "LH_EPS_03", 0), # Absolute steering angle ("EPS_VZ_BLW", "LH_EPS_03", 0), # Steering angle sign ("LWI_Lenkradw_Geschw", "LWI_01", 0), # Absolute steering rate ("LWI_VZ_Lenkradw_Geschw", "LWI_01", 0), # Steering rate sign ("ESP_VL_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, front left ("ESP_VR_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, front right ("ESP_HL_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, rear left ("ESP_HR_Radgeschw_02", "ESP_19", 0), # ABS wheel speed, rear right ("ESP_Gierrate", "ESP_02", 0), # Absolute yaw rate ("ESP_VZ_Gierrate", "ESP_02", 0), # Yaw rate sign ("ZV_FT_offen", "Gateway_72", 0), # Door open, driver ("ZV_BT_offen", "Gateway_72", 0), # Door open, passenger ("ZV_HFS_offen", "Gateway_72", 0), # Door open, rear left ("ZV_HBFS_offen", "Gateway_72", 0), # Door open, rear right ("ZV_HD_offen", "Gateway_72", 0), # Trunk or hatch open ("Comfort_Signal_Left", "Blinkmodi_02", 0), # Left turn signal including comfort blink interval ("Comfort_Signal_Right", "Blinkmodi_02", 0), # Right turn signal including comfort blink interval ("AB_Gurtschloss_FA", "Airbag_02", 0), # Seatbelt status, driver ("AB_Gurtschloss_BF", "Airbag_02", 0), # Seatbelt status, passenger ("ESP_Fahrer_bremst", "ESP_05", 0), # Brake pedal pressed ("ESP_Bremsdruck", "ESP_05", 0), # Brake pressure applied ("MO_Fahrpedalrohwert_01", "Motor_20", 0), # Accelerator pedal value ("EPS_Lenkmoment", "LH_EPS_03", 0), # Absolute driver torque input ("EPS_VZ_Lenkmoment", "LH_EPS_03", 0), # Driver torque input sign ("EPS_HCA_Status", "LH_EPS_03", 3), # EPS HCA control status ("ESP_Tastung_passiv", "ESP_21", 0), # Stability control disabled ("KBI_MFA_v_Einheit_02", "Einheiten_01", 0), # MPH vs KMH speed display ("KBI_Handbremse", "Kombi_01", 0), # Manual handbrake applied ("TSK_Status", "TSK_06", 0), # ACC engagement status from drivetrain coordinator ("GRA_Hauptschalter", "GRA_ACC_01", 0), # ACC button, on/off ("GRA_Abbrechen", "GRA_ACC_01", 0), # ACC button, cancel ("GRA_Tip_Setzen", "GRA_ACC_01", 0), # ACC button, set ("GRA_Tip_Hoch", "GRA_ACC_01", 0), # ACC button, increase or accel ("GRA_Tip_Runter", "GRA_ACC_01", 0), # ACC button, decrease or decel ("GRA_Tip_Wiederaufnahme", "GRA_ACC_01", 0), # ACC button, resume ("GRA_Verstellung_Zeitluecke", "GRA_ACC_01", 0), # ACC button, time gap adj ("GRA_Typ_Hauptschalter", "GRA_ACC_01", 0), # ACC main button type ("GRA_Tip_Stufe_2", "GRA_ACC_01", 0), # unknown related to stalk type ("GRA_ButtonTypeInfo", "GRA_ACC_01", 0), # unknown related to stalk type ("COUNTER", "GRA_ACC_01", 0), # GRA_ACC_01 CAN message counter ] checks = [ # sig_address, frequency ("LWI_01", 100), # From J500 Steering Assist with integrated sensors ("LH_EPS_03", 100), # From J500 Steering Assist with integrated sensors ("ESP_19", 100), # From J104 ABS/ESP controller ("ESP_05", 50), # From J104 ABS/ESP controller ("ESP_21", 50), # From J104 ABS/ESP controller ("Motor_20", 50), # From J623 Engine control module ("TSK_06", 50), # From J623 Engine control module ("ESP_02", 50), # From J104 ABS/ESP controller ("GRA_ACC_01", 33), # From J533 CAN gateway (via LIN from steering wheel controls) ("Gateway_72", 10), # From J533 CAN gateway (aggregated data) ("Airbag_02", 5), # From J234 Airbag control module ("Kombi_01", 2), # From J285 Instrument cluster ("Blinkmodi_02", 1), # From J519 BCM (sent at 1Hz when no lights active, 50Hz when active) ("Einheiten_01", 1), # From J??? not known if gateway, cluster, or BCM ] if CP.transmissionType == TransmissionType.automatic: signals += [("GE_Fahrstufe", "Getriebe_11", 0)] # Auto trans gear selector position checks += [("Getriebe_11", 20)] # From J743 Auto transmission control module elif CP.transmissionType == TransmissionType.direct: signals += [("GearPosition", "EV_Gearshift", 0)] # EV gear selector position checks += [("EV_Gearshift", 10)] # From J??? unknown EV control module elif CP.transmissionType == TransmissionType.manual: signals += [("MO_Kuppl_schalter", "Motor_14", 0), # Clutch switch ("BCM1_Rueckfahrlicht_Schalter", "Gateway_72", 0)] # Reverse light from BCM checks += [("Motor_14", 10)] # From J623 Engine control module if CP.networkLocation == NetworkLocation.fwdCamera: # Radars are here on CANBUS.pt signals += MqbExtraSignals.fwd_radar_signals checks += MqbExtraSignals.fwd_radar_checks if CP.enableBsm: signals += MqbExtraSignals.bsm_radar_signals checks += MqbExtraSignals.bsm_radar_checks return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.pt) @staticmethod def get_cam_can_parser(CP):
class MqbExtraSignals: # Additional signal and message lists for optional or bus-portable controllers fwd_radar_signals = [ ("ACC_Wunschgeschw", "ACC_02", 0), # ACC set speed ("AWV2_Freigabe", "ACC_10", 0), # FCW brake jerk release ("ANB_Teilbremsung_Freigabe", "ACC_10", 0), # AEB partial braking release ("ANB_Zielbremsung_Freigabe", "ACC_10", 0), # AEB target braking release ] fwd_radar_checks = [ ("ACC_10", 50), # From J428 ACC radar control module ("ACC_02", 17), # From J428 ACC radar control module ] bsm_radar_signals = [ ("SWA_Infostufe_SWA_li", "SWA_01", 0), # Blind spot object info, left ("SWA_Warnung_SWA_li", "SWA_01", 0), # Blind spot object warning, left ("SWA_Infostufe_SWA_re", "SWA_01", 0), # Blind spot object info, right ("SWA_Warnung_SWA_re", "SWA_01", 0), # Blind spot object warning, right ] bsm_radar_checks = [ ("SWA_01", 20), # From J1086 Lane Change Assist ]
signals = [ # sig_name, sig_address, default ("LDW_SW_Warnung_links", "LDW_02", 0), # Blind spot in warning mode on left side due to lane departure ("LDW_SW_Warnung_rechts", "LDW_02", 0), # Blind spot in warning mode on right side due to lane departure ("LDW_Seite_DLCTLC", "LDW_02", 0), # Direction of most likely lane departure (left or right) ("LDW_DLC", "LDW_02", 0), # Lane departure, distance to line crossing ("LDW_TLC", "LDW_02", 0), # Lane departure, time to line crossing ] checks = [ # sig_address, frequency ("LDW_02", 10) # From R242 Driver assistance camera ] if CP.networkLocation == NetworkLocation.gateway: # Radars are here on CANBUS.cam signals += MqbExtraSignals.fwd_radar_signals checks += MqbExtraSignals.fwd_radar_checks if CP.enableBsm: signals += MqbExtraSignals.bsm_radar_signals checks += MqbExtraSignals.bsm_radar_checks return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.cam)
test_fib.py
import time import json import logging import tempfile import random from datetime import datetime import pytest import requests from tests.common.fixtures.ptfhost_utils import run_icmp_responder # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.ptf_runner import ptf_runner from tests.common.helpers.assertions import pytest_assert from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_random_side from tests.common.dualtor.mux_simulator_control import mux_server_url from tests.common.utilities import is_ipv4_address logger = logging.getLogger(__name__) pytestmark = [ pytest.mark.topology('any') ] # Usually src-mac, dst-mac, vlan-id are optional hash keys. Not all the platform supports these optional hash keys. Not enable these three by default. # HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id'] HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'ip-proto'] SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255'] DST_IP_RANGE = ['9.0.0.0', '9.255.255.255'] SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF'] DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:FFFF:0:01::FFFF'] VLANIDS = range(1032, 1279) VLANIP = '192.168.{}.1/24' PTF_QLEN = 2000 DEFAULT_MUX_SERVER_PORT = 8080 PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json' @pytest.fixture(scope='module') def config_facts(duthosts): cfg_facts = {} for duthost in duthosts: cfg_facts[duthost.hostname] = [] for asic in duthost.asics: if asic.is_it_backend(): continue asic_cfg_facts = asic.config_facts(source='running')['ansible_facts'] cfg_facts[duthost.hostname].append(asic_cfg_facts) return cfg_facts @pytest.fixture(scope='module') def minigraph_facts(duthosts, tbinfo): return duthosts.get_extended_minigraph_facts(tbinfo) def get_t2_fib_info(duthosts, all_duts_cfg_facts, all_duts_mg_facts): """Get parsed FIB information from redis DB. Args: duthost (SonicHost): Object for interacting with DUT. cfg_facts (dict): Configuration facts. For multi asic platforms this will be list of dicts mg_facts (dict): Minigraph facts. Returns: dict: Map of prefix to PTF ports that are connected to DUT output ports. { '192.168.0.0/21': [], '192.168.8.0/25': [[58 59] [62 63] [66 67] [70 71]], '192.168.16.0/25': [[58 59] [62 63] [66 67] [70 71]], ... '20c0:c2e8:0:80::/64': [[58 59] [62 63] [66 67] [70 71]], '20c1:998::/64': [[58 59] [62 63] [66 67] [70 71]], ... } """ timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') fib_info = {} for dut_index, duthost in enumerate(duthosts.frontend_nodes): cfg_facts = all_duts_cfg_facts[duthost.hostname] mg_facts = all_duts_mg_facts[duthost.hostname] for asic_index, asic_cfg_facts in enumerate(cfg_facts): asic = duthost.asic_instance(asic_index) asic.shell("{} redis-dump -d 0 -k 'ROUTE*' -y > /tmp/fib.{}.txt".format(asic.ns_arg, timestamp)) duthost.fetch(src="/tmp/fib.{}.txt".format(timestamp), dest="/tmp/fib") po = asic_cfg_facts.get('PORTCHANNEL', {}) ports = asic_cfg_facts.get('PORT', {}) with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: fib = json.load(fp) for k, v in fib.items(): skip = False prefix = k.split(':', 1)[1] ifnames = v['value']['ifname'].split(',') nh = v['value']['nexthop'] oports = [] for ifname in ifnames: if po.has_key(ifname): # ignore the prefix, if the prefix nexthop is not a frontend port if 'members' in po[ifname]: if 'role' in ports[po[ifname]['members'][0]] and ports[po[ifname]['members'][0]]['role'] == 'Int': skip = True else: oports.append([str(mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) else: if ports.has_key(ifname): if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int': skip = True else: oports.append([str(mg_facts['minigraph_ptf_indices'][ifname])]) else: logger.info("Route point to non front panel port {}:{}".format(k, v)) skip = True # skip direct attached subnet if nh == '0.0.0.0' or nh == '::' or nh == "": skip = True if not skip: if prefix in fib_info: fib_info[prefix] += oports else: fib_info[prefix] = oports return fib_info def get_fib_info(duthost, cfg_facts, mg_facts): """Get parsed FIB information from redis DB. Args: duthost (SonicHost): Object for interacting with DUT. cfg_facts (dict): Configuration facts. For multi asic platforms this will be list of dicts mg_facts (dict): Minigraph facts. Returns: dict: Map of prefix to PTF ports that are connected to DUT output ports. { '192.168.0.0/21': [], '192.168.8.0/25': [[58 59] [62 63] [66 67] [70 71]], '192.168.16.0/25': [[58 59] [62 63] [66 67] [70 71]], ... '20c0:c2e8:0:80::/64': [[58 59] [62 63] [66 67] [70 71]], '20c1:998::/64': [[58 59] [62 63] [66 67] [70 71]], ... } """ timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') fib_info = {} for asic_index, asic_cfg_facts in enumerate(cfg_facts): asic = duthost.asic_instance(asic_index) asic.shell("{} redis-dump -d 0 -k 'ROUTE*' -y > /tmp/fib.{}.txt".format(asic.ns_arg, timestamp)) duthost.fetch(src="/tmp/fib.{}.txt".format(timestamp), dest="/tmp/fib") po = asic_cfg_facts.get('PORTCHANNEL', {}) ports = asic_cfg_facts.get('PORT', {}) sub_interfaces = asic_cfg_facts.get('VLAN_SUB_INTERFACE', {}) with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: fib = json.load(fp) for k, v in fib.items(): skip = False prefix = k.split(':', 1)[1] ifnames = v['value']['ifname'].split(',') nh = v['value']['nexthop'] oports = [] for ifname in ifnames: if po.has_key(ifname): # ignore the prefix, if the prefix nexthop is not a frontend port if 'members' in po[ifname]: if 'role' in ports[po[ifname]['members'][0]] and ports[po[ifname]['members'][0]]['role'] == 'Int': skip = True else: oports.append([str(mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) else: if sub_interfaces.has_key(ifname): oports.append([str(mg_facts['minigraph_ptf_indices'][ifname.split('.')[0]])]) elif ports.has_key(ifname): if 'role' in ports[ifname] and ports[ifname]['role'] == 'Int': skip = True else: oports.append([str(mg_facts['minigraph_ptf_indices'][ifname])]) else: logger.info("Route point to non front panel port {}:{}".format(k, v)) skip = True # skip direct attached subnet if nh == '0.0.0.0' or nh == '::' or nh == "": skip = True if not skip: if prefix in fib_info: fib_info[prefix] += oports else: fib_info[prefix] = oports # For single_asic device, add empty list for directly connected subnets elif skip and not duthost.is_multi_asic: fib_info[prefix] = [] return fib_info def gen_fib_info_file(ptfhost, fib_info, filename): tmp_fib_info = tempfile.NamedTemporaryFile() for prefix, oports in fib_info.items(): tmp_fib_info.write(prefix) if oports: for op in oports: tmp_fib_info.write(' [{}]'.format(' '.join(op))) else: tmp_fib_info.write(' []') tmp_fib_info.write('\n') tmp_fib_info.flush() ptfhost.copy(src=tmp_fib_info.name, dest=filename) @pytest.fixture(scope='function') def fib_info_files(duthosts, ptfhost, config_facts, minigraph_facts, tbinfo, request): testname = request.node.name files = [] if tbinfo['topo']['type'] != "t2": for dut_index, duthost in enumerate(duthosts): fib_info = get_fib_info(duthost, config_facts[duthost.hostname], minigraph_facts[duthost.hostname]) if 'test_basic_fib' in testname and 'backend' in tbinfo['topo']['name']: # if it is a storage backend topology(bt0 or bt1) and testcase is test_basic_fib # add a default route as failover in the prefix matching fib_info[u'0.0.0.0/0'] = [] fib_info[u'::/0'] = [] filename = '/root/fib_info_dut_{0}_{1}.txt'.format(testname, dut_index) gen_fib_info_file(ptfhost, fib_info, filename) files.append(filename) else: fib_info = get_t2_fib_info(duthosts, config_facts, minigraph_facts) filename = '/root/fib_info_all_duts.txt' gen_fib_info_file(ptfhost, fib_info, filename) files.append(filename) return files @pytest.fixture(scope='module') def disabled_ptf_ports(tbinfo): ports = set() for ptf_map in tbinfo['topo']['ptf_map_disabled'].values(): for ptf_port_index in ptf_map.values(): ports.add(ptf_port_index) return ports @pytest.fixture(scope='module') def vlan_ptf_ports(duthosts, config_facts, tbinfo): ports = set() for dut_index, duthost in enumerate(duthosts): for asic_config_fact in config_facts[duthost.hostname]: for vlan_members in asic_config_fact.get('VLAN_MEMBER', {}).values(): for intf in vlan_members.keys(): dut_port_index = asic_config_fact['port_index_map'][intf] ports.add(tbinfo['topo']['ptf_map'][str(dut_index)][str(dut_port_index)]) return ports @pytest.fixture(scope='module') def router_macs(duthosts): mac_addresses = [] for duthost in duthosts: mac_addresses.append(duthost.facts['router_mac']) return mac_addresses # For dualtor @pytest.fixture(scope='module') def vlan_macs(duthosts, config_facts): mac_addresses = [] for duthost in duthosts: dut_vlan_mac = None for asic_cfg_facts in config_facts[duthost.hostname]: for vlan in asic_cfg_facts.get('VLAN', {}).values(): if 'mac' in vlan: dut_vlan_mac = vlan['mac'] break if not dut_vlan_mac: dut_vlan_mac = duthost.facts['router_mac'] mac_addresses.append(dut_vlan_mac) return mac_addresses def set_mux_side(tbinfo, mux_server_url, side): if 'dualtor' in tbinfo['topo']['name']: res = requests.post(mux_server_url, json={"active_side": side}) pytest_assert(res.status_code==200, 'Failed to set active side: {}'.format(res.text)) return res.json() # Response is new mux_status of all mux Y-cables. return {} @pytest.fixture def set_mux_random(tbinfo, mux_server_url): return set_mux_side(tbinfo, mux_server_url, 'random') @pytest.fixture def set_mux_same_side(tbinfo, mux_server_url): return set_mux_side(tbinfo, mux_server_url, random.choice(['upper_tor', 'lower_tor'])) def get_mux_status(tbinfo, mux_server_url): if 'dualtor' in tbinfo['topo']['name']: res = requests.get(mux_server_url) pytest_assert(res.status_code==200, 'Failed to get mux status: {}'.format(res.text)) return res.json() return {} def ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs): active_dut_map = {} for mux_status in get_mux_status(tbinfo, mux_server_url).values(): active_dut_index = 0 if mux_status['active_side'] == 'upper_tor' else 1 active_dut_map[str(mux_status['port_index'])] = active_dut_index logger.info('router_macs={}'.format(router_macs)) logger.info('vlan_macs={}'.format(vlan_macs)) logger.info('vlan_ptf_ports={}'.format(vlan_ptf_ports)) logger.info('disabled_ptf_ports={}'.format(disabled_ptf_ports)) logger.info('active_dut_map={}'.format(active_dut_map)) ports_map = {} for ptf_port, dut_intf_map in tbinfo['topo']['ptf_dut_intf_map'].items(): if int(ptf_port) in disabled_ptf_ports: continue target_dut_index = None target_mac = None if int(ptf_port) in vlan_ptf_ports: # PTF port connected to VLAN interface of DUT if active_dut_map: # dualtor topology # If PTF port is connected to VLAN interface of dualToR DUTs, the PTF port index should be # same as DUT port index. Base on this fact to find out dut index of active side. target_dut_index = active_dut_map[ptf_port] target_mac = vlan_macs[target_dut_index] if target_dut_index is None: target_dut_index = int(dut_intf_map.keys()[0]) # None dualtor, target DUT is always the first and only DUT if target_mac is None: target_mac = router_macs[target_dut_index] ports_map[ptf_port] = {'target_dut': target_dut_index, 'target_mac': target_mac} ptfhost.copy(content=json.dumps(ports_map), dest=PTF_TEST_PORT_MAP) return PTF_TEST_PORT_MAP @pytest.fixture(scope="module") def ignore_ttl(duthosts): # on the multi asic devices, the packet can have different ttl based on how the packet is routed # within in the device. So set this flag to mask the ttl in the ptf test for duthost in duthosts: if duthost.sonichost.is_multi_asic: return True return False @pytest.fixture(scope="module") def single_fib_for_duts(tbinfo): # For a T2 topology, we are generating a single fib file across all asics, but have multiple frontend nodes (DUTS). if tbinfo['topo']['type'] == "t2": return True return False @pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)]) def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, set_mux_random, fib_info_files, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs, ignore_ttl, single_fib_for_duts): timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') # do not test load balancing for vs platform as kernel 4.9 # can only do load balance base on L3 if duthosts[0].facts['asic_type'] in ["vs"]: test_balancing = False else: test_balancing = True logging.info("run ptf test") log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format(ipv4, ipv6, timestamp) logging.info("PTF log file: %s" % log_file) ptf_runner(ptfhost, "ptftests", "fib_test.FibTest", platform_dir="ptftests", params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs), "router_macs": router_macs, "ipv4": ipv4, "ipv6": ipv6, "testbed_mtu": mtu, "test_balancing": test_balancing, "ignore_ttl": ignore_ttl, "single_fib_for_duts": single_fib_for_duts}, log_file=log_file, qlen=PTF_QLEN, socket_recv_size=16384) def get_vlan_untag_ports(duthosts, config_facts): """ get all untag vlan ports """ vlan_untag_ports = {} for duthost in duthosts: if duthost.is_multi_asic: continue ports = [] vlans = config_facts.get('VLAN_INTERFACE', {}).keys() for vlan in vlans: vlan_member_info = config_facts[duthost.hostname].get('VLAN_MEMBER', {}).get(vlan, {}) if vlan_member_info: for port_name, tag_mode in vlan_member_info.items(): if tag_mode['tagging_mode'] == 'untagged': ports.append(port_name) vlan_untag_ports[duthost.hostname] = ports return vlan_untag_ports @pytest.fixture(scope="module") def hash_keys(duthost): hash_keys = HASH_KEYS[:] # Copy from global var to avoid side effects of multiple iterations if 'dst-mac' in hash_keys: hash_keys.remove('dst-mac') # do not test load balancing on L4 port on vs platform as kernel 4.9
if 'dst-port' in hash_keys: hash_keys.remove('dst-port') if duthost.facts['asic_type'] in ["mellanox"]: if 'ip-proto' in hash_keys: hash_keys.remove('ip-proto') if duthost.facts['asic_type'] in ["barefoot"]: if 'ingress-port' in hash_keys: hash_keys.remove('ingress-port') # removing ingress-port and ip-proto from hash_keys not supported by Marvell SAI if duthost.facts['platform'] in ['armhf-nokia_ixs7215_52x-r0']: if 'ip-proto' in hash_keys: hash_keys.remove('ip-proto') if 'ingress-port' in hash_keys: hash_keys.remove('ingress-port') # remove the ingress port from multi asic platform # In multi asic platform each asic has different hash seed, # the same packet coming in different asic # could egress out of different port # the hash_test condition for hash_key == ingress_port will fail if duthost.sonichost.is_multi_asic: hash_keys.remove('ingress-port') return hash_keys def configure_vlan(duthost, ports): for vlan in VLANIDS: duthost.shell('config vlan add {}'.format(vlan)) for port in ports: duthost.shell('config vlan member add {} {}'.format(vlan, port)) duthost.shell('config interface ip add Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) time.sleep(5) def unconfigure_vlan(duthost, ports): for vlan in VLANIDS: for port in ports: duthost.shell('config vlan member del {} {}'.format(vlan, port)) duthost.shell('config interface ip remove Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) duthost.shell('config vlan del {}'.format(vlan)) time.sleep(5) @pytest.fixture def setup_vlan(tbinfo, duthosts, config_facts, hash_keys): vlan_untag_ports = get_vlan_untag_ports(duthosts, config_facts) need_to_clean_vlan = False # add some vlan for hash_key vlan-id test if tbinfo['topo']['type'] == 't0' and 'dualtor' not in tbinfo['topo']['name'] and 'vlan-id' in hash_keys: for duthost in duthosts: configure_vlan(duthost, vlan_untag_ports[duthost.hostname]) need_to_clean_vlan = True yield # remove added vlan if need_to_clean_vlan: for duthost in duthosts: unconfigure_vlan(duthost, vlan_untag_ports[duthost.hostname]) @pytest.fixture(params=["ipv4", "ipv6"]) def ipver(request): return request.param @pytest.fixture def add_default_route_to_dut(config_facts, duthosts, tbinfo): """ Add a default route to the device for storage backend testbed. This is to ensure the IO packets could be successfully directed. """ if "backend" in tbinfo["topo"]["name"]: logging.info("Add default route on the DUT.") try: for duthost in duthosts: cfg_facts = config_facts[duthost.hostname] for asic_index, asic_cfg_facts in enumerate(cfg_facts): asic = duthost.asic_instance(asic_index) bgp_neighbors = asic_cfg_facts["BGP_NEIGHBOR"] ipv4_cmd_parts = ["ip route add default"] ipv6_cmd_parts = ["ip -6 route add default"] for neighbor in bgp_neighbors.keys(): if is_ipv4_address(neighbor): ipv4_cmd_parts.append("nexthop via %s" % neighbor) else: ipv6_cmd_parts.append("nexthop via %s" % neighbor) ipv4_cmd_parts.sort() ipv6_cmd_parts.sort() # limit to 4 nexthop entries ipv4_cmd = " ".join(ipv4_cmd_parts[:5]) ipv6_cmd = " ".join(ipv6_cmd_parts[:5]) asic.shell(ipv4_cmd) asic.shell(ipv6_cmd) yield finally: logging.info("Remove default route on the DUT.") for duthost in duthosts: for asic in duthost.asics: if asic.is_it_backend(): continue asic.shell("ip route del default", module_ignore_errors=True) asic.shell("ip -6 route del default", module_ignore_errors=True) else: yield def test_hash(add_default_route_to_dut, fib_info_files, setup_vlan, hash_keys, ptfhost, ipver, set_mux_same_side, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs, ignore_ttl, single_fib_for_duts): timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/hash_test.HashTest.{}.{}.log".format(ipver, timestamp) logging.info("PTF log file: %s" % log_file) if ipver == "ipv4": src_ip_range = SRC_IP_RANGE dst_ip_range = DST_IP_RANGE else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE ptf_runner(ptfhost, "ptftests", "hash_test.HashTest", platform_dir="ptftests", params={"fib_info_files": fib_info_files[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, mux_server_url, disabled_ptf_ports, vlan_ptf_ports, router_macs, vlan_macs), "hash_keys": hash_keys, "src_ip_range": ",".join(src_ip_range), "dst_ip_range": ",".join(dst_ip_range), "router_macs": router_macs, "vlan_ids": VLANIDS, "ignore_ttl":ignore_ttl, "single_fib_for_duts": single_fib_for_duts }, log_file=log_file, qlen=PTF_QLEN, socket_recv_size=16384)
# can only do load balance base on L3 if duthost.facts['asic_type'] in ["vs"]: if 'src-port' in hash_keys: hash_keys.remove('src-port')
iaas_clients_suite_test.go
package iaas_clients import ( "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) func TestIaasClients(t *testing.T)
{ RegisterFailHandler(Fail) RunSpecs(t, "IaasClients Suite") }
TicTacToe.py
#Implementation of Two Player Tic-Tac-Toe game in Python. ''' We will make the board using dictionary in which keys will be the location(i.e : top-left,mid-right,etc.) and initialliy it's values will be empty space and then after every move we will change the value according to player's choice of move. ''' theBoard = {'7': ' ' , '8': ' ' , '9': ' ' , '4': ' ' , '5': ' ' , '6': ' ' , '1': ' ' , '2': ' ' , '3': ' ' } board_keys = [] for key in theBoard: board_keys.append(key) ''' We will have to print the updated board after every move in the game and thus we will make a function in which we'll define the printBoard function so that we can easily print the board everytime by calling this function. ''' def printBoard(board): print(board['7'] + '|' + board['8'] + '|' + board['9']) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6']) print('-+-+-') print(board['1'] + '|' + board['2'] + '|' + board['3']) # Now we'll write the main function which has all the gameplay functionality. def
(): turn = 'X' count = 0 for i in range(10): printBoard(theBoard) print("It's your turn," + turn + ".Move to which place?") move = input() if theBoard[move] == ' ': theBoard[move] = turn count += 1 else: print("That place is already filled.\nMove to which place?") continue # Now we will check if player X or O has won,for every move after 5 moves. if count >= 5: if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal printBoard(theBoard) print("\nGame Over.\n") print(" **** " +turn + " won. ****") break # If neither X nor O wins and the board is full, we'll declare the result as 'tie'. if count == 9: print("\nGame Over.\n") print("It's a Tie!!") # Now we have to change the player after every move. if turn =='X': turn = 'O' else: turn = 'X' # Now we will ask if player wants to restart the game or not. restart = input("Do want to play Again?(y/n)") if restart == "y" or restart == "Y": for key in board_keys: theBoard[key] = " " game() if __name__ == "__main__": game()1
game
test_assume_role_executor_factory.py
import pytest from awsassume.assume_role_cache_executor import AssumeRoleCacheExecutor from awsassume.assume_role_executor_factory import AssumeRoleExecutorFactory from awsassume.assume_role_no_cache_executor import AssumeRoleNoCacheExecutor from awsassume.data_models import CliArgs @pytest.fixture(scope='module', params=[True, False]) def cli_args(request): return CliArgs(role_arn='arn:aws:iam::123456789012:role/rolename', role_session_name='sessionname', command='aws s3 ls', region_name='ap-southeast-1', no_cache=request.param) def test_get_executor(cli_args): assume_role_executor = AssumeRoleExecutorFactory.get_executor(cli_args) if cli_args.no_cache is True: assert isinstance(assume_role_executor, AssumeRoleNoCacheExecutor) is True else:
assert isinstance(assume_role_executor, AssumeRoleCacheExecutor) is True
wpmr.rs
#[doc = "Reader of register WPMR"] pub type R = crate::R<u32, super::WPMR>; #[doc = "Writer for register WPMR"] pub type W = crate::W<u32, super::WPMR>; #[doc = "Register WPMR `reset()`'s with value 0"] impl crate::ResetValue for super::WPMR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `WPEN`"] pub type WPEN_R = crate::R<bool, bool>; #[doc = "Write proxy for field `WPEN`"] pub struct
<'a> { w: &'a mut W, } impl<'a> WPEN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Reader of field `WPKEY`"] pub type WPKEY_R = crate::R<u32, u32>; #[doc = "Write proxy for field `WPKEY`"] pub struct WPKEY_W<'a> { w: &'a mut W, } impl<'a> WPKEY_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !(0x00ff_ffff << 8)) | (((value as u32) & 0x00ff_ffff) << 8); self.w } } impl R { #[doc = "Bit 0 - Write Protection Enable"] #[inline(always)] pub fn wpen(&self) -> WPEN_R { WPEN_R::new((self.bits & 0x01) != 0) } #[doc = "Bits 8:31 - SPI Write Protection Key Password"] #[inline(always)] pub fn wpkey(&self) -> WPKEY_R { WPKEY_R::new(((self.bits >> 8) & 0x00ff_ffff) as u32) } } impl W { #[doc = "Bit 0 - Write Protection Enable"] #[inline(always)] pub fn wpen(&mut self) -> WPEN_W { WPEN_W { w: self } } #[doc = "Bits 8:31 - SPI Write Protection Key Password"] #[inline(always)] pub fn wpkey(&mut self) -> WPKEY_W { WPKEY_W { w: self } } }
WPEN_W
markdown_test.go
// Copyright 2018 Microsoft Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package markdown import "testing" func TestMarkdownWriter_String(t *testing.T) { md := Writer{}
md.WriteLine("Foo") md.WriteSubheader("Sub-header2") md.WriteLine(getTable().String()) result := md.String() expected := `# Title ## Top-level header ### Header1 #### Sub-header1 Foo #### Sub-header2 | packages | api-versions | | :--- | :--- | | compute | 2020-06-01 | | network | 2020-06-01 | | resources | 2020-09-01 | ` if result != expected { t.Fatalf("expected %s, but got %s", expected, result) } } func getTable() *Table { table := NewTable("ll", "packages", "api-versions") table.AddRow("compute", "2020-06-01") table.AddRow("network", "2020-06-01") table.AddRow("resources", "2020-09-01") return table } func TestMarkdownTable_String(t *testing.T) { table := getTable() result := table.String() expected := `| packages | api-versions | | :--- | :--- | | compute | 2020-06-01 | | network | 2020-06-01 | | resources | 2020-09-01 |` if result != expected { t.Fatalf("expected %s, but got %s", expected, result) } }
md.WriteTitle("Title") md.WriteTopLevelHeader("Top-level header") md.WriteHeader("Header1") md.WriteSubheader("Sub-header1")
memkv_test.go
package memkv import ( "testing" "github.com/v2pro/quokka/kvstore" "github.com/stretchr/testify/require" ) func Test_scan_metadata(t *testing.T) { ResetKVStore() should := require.New(t) kvstore.SetMetadata("server_1.2.3.4:8000", []byte("1.2.3.4:8000")) kvstore.SetMetadata("server_1.2.3.4:8001", []byte("1.2.3.4:8001")) iter, err := kvstore.ScanMetadata("server_", "server"+string([]byte{'_' + 1})) should.Nil(err) batch, err := iter() should.Nil(err) should.Equal("1.2.3.4:8000", string(batch[0].Value))
should.Nil(batch) }
should.Equal("1.2.3.4:8001", string(batch[1].Value)) batch, err = iter() should.Nil(err)
ITiming.py
# -*- coding: utf-8 -*- """ pip_services_runtime.ITiming ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Callback interface to complete measuring time interval. :copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details. :license: MIT, see LICENSE for more details. """
Callback interface to complete measuring time interval """ def end_timing(self): """ Completes measuring time interval and updates counter. Returns: None """ #raise NotImplementedError('Method from interface definition') pass
class ITiming(object): """
a_posteriori.py
# coding=utf-8 # Author: Rafael Menelau Oliveira e Cruz <[email protected]> # # License: BSD 3 clause import numpy as np from deslib.dcs.base import BaseDCS class APosteriori(BaseDCS): """A Posteriori Dynamic classifier selection. The A Posteriori method uses the probability of correct classification of a given base classifier :math:`c_{i}` for each neighbor :math:`x_{k}` with respect to a single class. Consider a classifier :math:`c_{i}` that assigns a test sample to class :math:`w_{l}`. Then, only the samples belonging to class :math:`w_{l}` are taken into account during the competence level estimates. Base classifiers with a higher probability of correct classification have a higher competence level. Moreover, the method also weights the influence of each neighbor :math:`x_{k}` according to its Euclidean distance to the query sample. The closest neighbors have a higher influence on the competence level estimate. In cases where no sample in the region of competence belongs to the predicted class, :math:`w_{l}`, the competence level estimate of the base classifier is equal to zero. A single classifier is selected only if its competence level is significantly higher than that of the other base classifiers in the pool (higher than a pre-defined threshold). Otherwise, all classifiers in the pool are combined using the majority voting rule. The selection methodology can be modified by modifying the hyper-parameter selection_method. Parameters ---------- pool_classifiers : list of classifiers (Default = None) The generated_pool of classifiers trained for the corresponding classification problem. Each base classifiers should support the method "predict" and "predict_proba". If None, then the pool of classifiers is a bagging classifier. k : int (Default = 7) Number of neighbors used to estimate the competence of the base classifiers. DFP : Boolean (Default = False) Determines if the dynamic frienemy pruning is applied. with_IH : Boolean (Default = False) Whether the hardness level of the region of competence is used to decide between using the DS algorithm or the KNN for classification of a given query sample. safe_k : int (default = None) The size of the indecision region. IH_rate : float (default = 0.3) Hardness threshold. If the hardness level of the competence region is lower than the IH_rate the KNN classifier is used. Otherwise, the DS algorithm is used for classification. selection_method : String (Default = "best") Determines which method is used to select the base classifier after the competences are estimated. diff_thresh : float (Default = 0.1) Threshold to measure the difference between the competence level of the base classifiers for the random and diff selection schemes. If the difference is lower than the threshold, their performance are considered equivalent. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. knn_classifier : {'knn', 'faiss', None} (Default = 'knn') The algorithm used to estimate the region of competence: - 'knn' will use :class:`KNeighborsClassifier` from sklearn :class:`KNNE` available on `deslib.utils.knne` - 'faiss' will use Facebook's Faiss similarity search through the class :class:`FaissKNNClassifier` - None, will use sklearn :class:`KNeighborsClassifier`. knne : bool (Default=False) Whether to use K-Nearest Neighbor Equality (KNNE) for the region of competence estimation. DSEL_perc : float (Default = 0.5) Percentage of the input data used to fit DSEL. Note: This parameter is only used if the pool of classifier is None or unfitted. n_jobs : int, default=-1 The number of parallel jobs to run. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. Doesn’t affect fit method. References ---------- G. Giacinto and F. Roli, Methods for Dynamic Classifier Selection 10th Int. Conf. on Image Anal. and Proc., Venice, Italy (1999), 659-664. Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr. "From dynamic classifier selection to dynamic ensemble selection." Pattern Recognition 41.5 (2008): 1718-1731. Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection of classifiers—a comprehensive review." Pattern Recognition 47.11 (2014): 3665-3680. R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier
""" def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False, safe_k=None, IH_rate=0.30, selection_method='diff', diff_thresh=0.1, random_state=None, knn_classifier='knn', knne=False, DSEL_perc=0.5, n_jobs=-1): super(APosteriori, self).__init__(pool_classifiers=pool_classifiers, k=k, DFP=DFP, with_IH=with_IH, safe_k=safe_k, IH_rate=IH_rate, selection_method=selection_method, diff_thresh=diff_thresh, knn_classifier=knn_classifier, random_state=random_state, knne=knne, DSEL_perc=DSEL_perc, n_jobs=n_jobs) def fit(self, X, y): """Prepare the DS model by setting the KNN algorithm and pre-processing the information required to apply the DS method. Parameters ---------- X : array of shape (n_samples, n_features) Data used to fit the model. y : array of shape (n_samples) class labels of each example in X. Returns ------- self """ super(APosteriori, self).fit(X, y) self._check_predict_proba() self.dsel_scores_ = self._predict_proba_base(self.DSEL_data_) return self def estimate_competence(self, competence_region, distances, predictions=None): """Estimate the competence of each base classifier :math:`c_{i}` for the classification of the query sample using the A Posteriori method. The competence level is estimated based on the probability of correct classification of the base classifier :math:`c_{i}`, for each neighbor :math:`x_{k}` belonging to a specific class :math:`w_{l}`. In this case, :math:`w_{l}` is the class predicted by the base classifier :math:`c_{i}`, for the query sample. This method also weights the influence of each training sample according to its Euclidean distance to the query instance. The closest samples have a higher influence in the computation of the competence level. The competence level estimate is represented by the following equation: .. math:: \\delta_{i,j} = \\frac{\\sum_{\\mathbf{x}_{k} \\in \\omega_{l}}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}} {\\sum_{k = 1}^{K}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}} where :math:`\\delta_{i,j}` represents the competence level of :math:`c_{i}` for the classification of query. Parameters ---------- competence_region : array of shape (n_samples, n_neighbors) Indices of the k nearest neighbors. distances : array of shape (n_samples, n_neighbors) Distances from the k nearest neighbors to the query. predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for the test examples. Returns ------- competences : array of shape (n_samples, n_classifiers) Competence level estimated for each base classifier and test example. """ # Guarantee that these arrays are view as a 2D array for the case where # a single test sample is passed down. predictions = np.atleast_2d(predictions) distances[distances == 0] = 1e-10 # Normalize the distances dists_normalized = 1.0 / distances # Expanding the dimensions of the predictions and target arrays in # order to compare both. predictions_3d = np.expand_dims(predictions, axis=1) target_3d = self.DSEL_target_[competence_region, np.newaxis] # Create a mask to remove the neighbors belonging to a different class # than the predicted by the base classifier mask = (predictions_3d != target_3d) # Broadcast the distance array to the same shape as the pre-processed # information for future calculations dists_normalized = np.repeat(np.expand_dims(dists_normalized, axis=2), self.n_classifiers_, axis=2) # Multiply the pre-processed correct predictions by the base # classifiers to the distance array scores_target = self.dsel_scores_[competence_region, :, self.DSEL_target_[competence_region]] scores_target_norm = scores_target * dists_normalized # Create masked arrays to remove samples with different label in the # calculations masked_preprocessed = np.ma.MaskedArray(scores_target_norm, mask=mask) masked_dist = np.ma.MaskedArray(dists_normalized, mask=mask) # Consider only the neighbor samples where the predicted label is # equals to the neighbor label competences_masked = np.ma.sum(masked_preprocessed, axis=1) / np.ma.sum(masked_dist, axis=1) # Fill 0 to the masked values in the resulting array (when no neighbors # belongs to the class predicted by the corresponding base classifier) competences = np.ma.filled(competences_masked, 0) return competences
selection: Recent advances and perspectives,” Information Fusion, vol. 41, pp. 195 – 216, 2018.
tools.go
// +build tools generate //go:generate sh -c "GOBIN=$PWD/.gobincache go install $(sed -n 's/.*_ \"\\(.*\\)\".*/\\1/p' <$GOFILE)" package tools import ( _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
)
estropada-navegation.component.spec.ts
import { ComponentFixture, TestBed, waitForAsync } from '@angular/core/testing'; import { RouterTestingModule } from '@angular/router/testing'; import { MatIconModule} from '@angular/material/icon'; import { MatButtonModule } from '@angular/material/button'; import { EstropadaNavegationComponent } from './estropada-navegation.component'; import { EstropadakNavegationService } from '../shared/estropadak-navegation.service'; import { EstropadaService } from '../shared/estropada.service'; import { EstropadaServiceStub } from '../shared/estropada.service.stub'; describe('EstropadaNavegationComponent', () => { let component: EstropadaNavegationComponent; let fixture: ComponentFixture<EstropadaNavegationComponent>; beforeEach(waitForAsync(() => { TestBed.configureTestingModule({ imports: [ MatIconModule, MatButtonModule, RouterTestingModule ], declarations: [ EstropadaNavegationComponent ], providers: [ EstropadakNavegationService, {provide: EstropadaService, useClass: EstropadaServiceStub} ] }) .compileComponents(); })); beforeEach(() => { fixture = TestBed.createComponent(EstropadaNavegationComponent); component = fixture.componentInstance; const estropadakNavegationService = fixture.debugElement.injector.get(EstropadakNavegationService); estropadakNavegationService.estropadak = ['11', '22', '33', '44', '55']; component.estropadaId = '22'; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); }); it('should have a next id', () => { expect(component.next).toBe('33'); });
expect(component.prev).toBe('11'); }); });
it('should have a prev id', () => {
model.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// <p>Validate exception field.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ValidationExceptionField { /// <p>Validate exception field name.</p> pub name: std::option::Option<std::string::String>, /// <p>Validate exception field message.</p> pub message: std::option::Option<std::string::String>, } impl ValidationExceptionField { /// <p>Validate exception field name.</p> pub fn name(&self) -> std::option::Option<&str> { self.name.as_deref() } /// <p>Validate exception field message.</p> pub fn message(&self) -> std::option::Option<&str> { self.message.as_deref() } } impl std::fmt::Debug for ValidationExceptionField { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ValidationExceptionField"); formatter.field("name", &self.name); formatter.field("message", &self.message); formatter.finish() } } /// See [`ValidationExceptionField`](crate::model::ValidationExceptionField) pub mod validation_exception_field { /// A builder for [`ValidationExceptionField`](crate::model::ValidationExceptionField) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, pub(crate) message: std::option::Option<std::string::String>, } impl Builder { /// <p>Validate exception field name.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } /// <p>Validate exception field name.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>Validate exception field message.</p> pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } /// <p>Validate exception field message.</p> pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`ValidationExceptionField`](crate::model::ValidationExceptionField) pub fn build(self) -> crate::model::ValidationExceptionField { crate::model::ValidationExceptionField { name: self.name, message: self.message, } } } } impl ValidationExceptionField { /// Creates a new builder-style object to manufacture [`ValidationExceptionField`](crate::model::ValidationExceptionField) pub fn builder() -> crate::model::validation_exception_field::Builder { crate::model::validation_exception_field::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ValidationExceptionReason { #[allow(missing_docs)] // documentation missing in model CannotParse, #[allow(missing_docs)] // documentation missing in model FieldValidationFailed, #[allow(missing_docs)] // documentation missing in model Other, #[allow(missing_docs)] // documentation missing in model UnknownOperation, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ValidationExceptionReason { fn from(s: &str) -> Self { match s { "cannotParse" => ValidationExceptionReason::CannotParse, "fieldValidationFailed" => ValidationExceptionReason::FieldValidationFailed, "other" => ValidationExceptionReason::Other, "unknownOperation" => ValidationExceptionReason::UnknownOperation, other => ValidationExceptionReason::Unknown(other.to_owned()), } } } impl std::str::FromStr for ValidationExceptionReason { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ValidationExceptionReason::from(s)) } } impl ValidationExceptionReason { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ValidationExceptionReason::CannotParse => "cannotParse", ValidationExceptionReason::FieldValidationFailed => "fieldValidationFailed", ValidationExceptionReason::Other => "other", ValidationExceptionReason::UnknownOperation => "unknownOperation", ValidationExceptionReason::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "cannotParse", "fieldValidationFailed", "other", "unknownOperation", ] } } impl AsRef<str> for ValidationExceptionReason { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A rule in the Point in Time (PIT) policy representing when to take snapshots and how long to retain them for.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct PitPolicyRule { /// <p>The ID of the rule.</p> pub rule_id: i64, /// <p>The units used to measure the interval and retentionDuration.</p> pub units: std::option::Option<crate::model::PitPolicyRuleUnits>, /// <p>How often, in the chosen units, a snapshot should be taken.</p> pub interval: i32, /// <p>The duration to retain a snapshot for, in the chosen units.</p> pub retention_duration: i32, /// <p>Whether this rule is enabled or not.</p> pub enabled: std::option::Option<bool>, } impl PitPolicyRule { /// <p>The ID of the rule.</p> pub fn rule_id(&self) -> i64 { self.rule_id } /// <p>The units used to measure the interval and retentionDuration.</p> pub fn units(&self) -> std::option::Option<&crate::model::PitPolicyRuleUnits> { self.units.as_ref() } /// <p>How often, in the chosen units, a snapshot should be taken.</p> pub fn interval(&self) -> i32 { self.interval } /// <p>The duration to retain a snapshot for, in the chosen units.</p> pub fn retention_duration(&self) -> i32 { self.retention_duration } /// <p>Whether this rule is enabled or not.</p> pub fn enabled(&self) -> std::option::Option<bool> { self.enabled } } impl std::fmt::Debug for PitPolicyRule { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("PitPolicyRule"); formatter.field("rule_id", &self.rule_id); formatter.field("units", &self.units); formatter.field("interval", &self.interval); formatter.field("retention_duration", &self.retention_duration); formatter.field("enabled", &self.enabled); formatter.finish() } } /// See [`PitPolicyRule`](crate::model::PitPolicyRule) pub mod pit_policy_rule { /// A builder for [`PitPolicyRule`](crate::model::PitPolicyRule) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) rule_id: std::option::Option<i64>, pub(crate) units: std::option::Option<crate::model::PitPolicyRuleUnits>, pub(crate) interval: std::option::Option<i32>, pub(crate) retention_duration: std::option::Option<i32>, pub(crate) enabled: std::option::Option<bool>, } impl Builder { /// <p>The ID of the rule.</p> pub fn rule_id(mut self, input: i64) -> Self { self.rule_id = Some(input); self } /// <p>The ID of the rule.</p> pub fn set_rule_id(mut self, input: std::option::Option<i64>) -> Self { self.rule_id = input; self } /// <p>The units used to measure the interval and retentionDuration.</p> pub fn units(mut self, input: crate::model::PitPolicyRuleUnits) -> Self { self.units = Some(input); self } /// <p>The units used to measure the interval and retentionDuration.</p> pub fn set_units( mut self, input: std::option::Option<crate::model::PitPolicyRuleUnits>, ) -> Self { self.units = input; self } /// <p>How often, in the chosen units, a snapshot should be taken.</p> pub fn interval(mut self, input: i32) -> Self { self.interval = Some(input); self } /// <p>How often, in the chosen units, a snapshot should be taken.</p> pub fn set_interval(mut self, input: std::option::Option<i32>) -> Self { self.interval = input; self } /// <p>The duration to retain a snapshot for, in the chosen units.</p> pub fn retention_duration(mut self, input: i32) -> Self { self.retention_duration = Some(input); self } /// <p>The duration to retain a snapshot for, in the chosen units.</p> pub fn set_retention_duration(mut self, input: std::option::Option<i32>) -> Self { self.retention_duration = input; self } /// <p>Whether this rule is enabled or not.</p> pub fn enabled(mut self, input: bool) -> Self { self.enabled = Some(input); self } /// <p>Whether this rule is enabled or not.</p> pub fn set_enabled(mut self, input: std::option::Option<bool>) -> Self { self.enabled = input; self } /// Consumes the builder and constructs a [`PitPolicyRule`](crate::model::PitPolicyRule) pub fn build(self) -> crate::model::PitPolicyRule { crate::model::PitPolicyRule { rule_id: self.rule_id.unwrap_or_default(), units: self.units, interval: self.interval.unwrap_or_default(), retention_duration: self.retention_duration.unwrap_or_default(), enabled: self.enabled, } } } } impl PitPolicyRule { /// Creates a new builder-style object to manufacture [`PitPolicyRule`](crate::model::PitPolicyRule) pub fn builder() -> crate::model::pit_policy_rule::Builder { crate::model::pit_policy_rule::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum PitPolicyRuleUnits { #[allow(missing_docs)] // documentation missing in model Day, #[allow(missing_docs)] // documentation missing in model Hour, #[allow(missing_docs)] // documentation missing in model Minute, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for PitPolicyRuleUnits { fn from(s: &str) -> Self { match s { "DAY" => PitPolicyRuleUnits::Day, "HOUR" => PitPolicyRuleUnits::Hour, "MINUTE" => PitPolicyRuleUnits::Minute, other => PitPolicyRuleUnits::Unknown(other.to_owned()), } } } impl std::str::FromStr for PitPolicyRuleUnits { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(PitPolicyRuleUnits::from(s)) } } impl PitPolicyRuleUnits { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { PitPolicyRuleUnits::Day => "DAY", PitPolicyRuleUnits::Hour => "HOUR", PitPolicyRuleUnits::Minute => "MINUTE", PitPolicyRuleUnits::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["DAY", "HOUR", "MINUTE"] } } impl AsRef<str> for PitPolicyRuleUnits { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ReplicationConfigurationDataPlaneRouting { #[allow(missing_docs)] // documentation missing in model PrivateIp, #[allow(missing_docs)] // documentation missing in model PublicIp, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ReplicationConfigurationDataPlaneRouting { fn from(s: &str) -> Self { match s { "PRIVATE_IP" => ReplicationConfigurationDataPlaneRouting::PrivateIp, "PUBLIC_IP" => ReplicationConfigurationDataPlaneRouting::PublicIp, other => ReplicationConfigurationDataPlaneRouting::Unknown(other.to_owned()), } } } impl std::str::FromStr for ReplicationConfigurationDataPlaneRouting { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ReplicationConfigurationDataPlaneRouting::from(s)) } } impl ReplicationConfigurationDataPlaneRouting { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ReplicationConfigurationDataPlaneRouting::PrivateIp => "PRIVATE_IP", ReplicationConfigurationDataPlaneRouting::PublicIp => "PUBLIC_IP", ReplicationConfigurationDataPlaneRouting::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["PRIVATE_IP", "PUBLIC_IP"] } } impl AsRef<str> for ReplicationConfigurationDataPlaneRouting { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ReplicationConfigurationEbsEncryption { #[allow(missing_docs)] // documentation missing in model Custom, #[allow(missing_docs)] // documentation missing in model Default, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ReplicationConfigurationEbsEncryption { fn from(s: &str) -> Self { match s { "CUSTOM" => ReplicationConfigurationEbsEncryption::Custom, "DEFAULT" => ReplicationConfigurationEbsEncryption::Default, other => ReplicationConfigurationEbsEncryption::Unknown(other.to_owned()), } } } impl std::str::FromStr for ReplicationConfigurationEbsEncryption { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ReplicationConfigurationEbsEncryption::from(s)) } } impl ReplicationConfigurationEbsEncryption { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ReplicationConfigurationEbsEncryption::Custom => "CUSTOM", ReplicationConfigurationEbsEncryption::Default => "DEFAULT", ReplicationConfigurationEbsEncryption::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["CUSTOM", "DEFAULT"] } } impl AsRef<str> for ReplicationConfigurationEbsEncryption { fn as_ref(&self) -> &str { self.as_str() } } /// <p>The configuration of a disk of the Source Server to be replicated.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ReplicationConfigurationReplicatedDisk { /// <p>The name of the device.</p> pub device_name: std::option::Option<std::string::String>, /// <p>Whether to boot from this disk or not.</p> pub is_boot_disk: std::option::Option<bool>, /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub staging_disk_type: std::option::Option<crate::model::ReplicationConfigurationReplicatedDiskStagingDiskType>, /// <p>The requested number of I/O operations per second (IOPS).</p> pub iops: i64, /// <p>The throughput to use for the EBS volume in MiB/s. This parameter is valid only for gp3 volumes.</p> pub throughput: i64, } impl ReplicationConfigurationReplicatedDisk { /// <p>The name of the device.</p> pub fn device_name(&self) -> std::option::Option<&str> { self.device_name.as_deref() } /// <p>Whether to boot from this disk or not.</p> pub fn is_boot_disk(&self) -> std::option::Option<bool> { self.is_boot_disk } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn staging_disk_type( &self, ) -> std::option::Option<&crate::model::ReplicationConfigurationReplicatedDiskStagingDiskType> { self.staging_disk_type.as_ref() } /// <p>The requested number of I/O operations per second (IOPS).</p> pub fn iops(&self) -> i64 { self.iops } /// <p>The throughput to use for the EBS volume in MiB/s. This parameter is valid only for gp3 volumes.</p> pub fn throughput(&self) -> i64 { self.throughput } } impl std::fmt::Debug for ReplicationConfigurationReplicatedDisk { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ReplicationConfigurationReplicatedDisk"); formatter.field("device_name", &self.device_name); formatter.field("is_boot_disk", &self.is_boot_disk); formatter.field("staging_disk_type", &self.staging_disk_type); formatter.field("iops", &self.iops); formatter.field("throughput", &self.throughput); formatter.finish() } } /// See [`ReplicationConfigurationReplicatedDisk`](crate::model::ReplicationConfigurationReplicatedDisk) pub mod replication_configuration_replicated_disk { /// A builder for [`ReplicationConfigurationReplicatedDisk`](crate::model::ReplicationConfigurationReplicatedDisk) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) device_name: std::option::Option<std::string::String>, pub(crate) is_boot_disk: std::option::Option<bool>, pub(crate) staging_disk_type: std::option::Option< crate::model::ReplicationConfigurationReplicatedDiskStagingDiskType, >, pub(crate) iops: std::option::Option<i64>, pub(crate) throughput: std::option::Option<i64>, } impl Builder { /// <p>The name of the device.</p> pub fn device_name(mut self, input: impl Into<std::string::String>) -> Self { self.device_name = Some(input.into()); self } /// <p>The name of the device.</p> pub fn set_device_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.device_name = input; self } /// <p>Whether to boot from this disk or not.</p> pub fn is_boot_disk(mut self, input: bool) -> Self { self.is_boot_disk = Some(input); self } /// <p>Whether to boot from this disk or not.</p> pub fn set_is_boot_disk(mut self, input: std::option::Option<bool>) -> Self { self.is_boot_disk = input; self } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn staging_disk_type( mut self, input: crate::model::ReplicationConfigurationReplicatedDiskStagingDiskType, ) -> Self { self.staging_disk_type = Some(input); self } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn set_staging_disk_type( mut self, input: std::option::Option< crate::model::ReplicationConfigurationReplicatedDiskStagingDiskType, >, ) -> Self { self.staging_disk_type = input; self } /// <p>The requested number of I/O operations per second (IOPS).</p> pub fn iops(mut self, input: i64) -> Self { self.iops = Some(input); self } /// <p>The requested number of I/O operations per second (IOPS).</p> pub fn set_iops(mut self, input: std::option::Option<i64>) -> Self { self.iops = input; self } /// <p>The throughput to use for the EBS volume in MiB/s. This parameter is valid only for gp3 volumes.</p> pub fn throughput(mut self, input: i64) -> Self { self.throughput = Some(input); self } /// <p>The throughput to use for the EBS volume in MiB/s. This parameter is valid only for gp3 volumes.</p> pub fn set_throughput(mut self, input: std::option::Option<i64>) -> Self { self.throughput = input; self } /// Consumes the builder and constructs a [`ReplicationConfigurationReplicatedDisk`](crate::model::ReplicationConfigurationReplicatedDisk) pub fn build(self) -> crate::model::ReplicationConfigurationReplicatedDisk { crate::model::ReplicationConfigurationReplicatedDisk { device_name: self.device_name, is_boot_disk: self.is_boot_disk, staging_disk_type: self.staging_disk_type, iops: self.iops.unwrap_or_default(), throughput: self.throughput.unwrap_or_default(), } } } } impl ReplicationConfigurationReplicatedDisk { /// Creates a new builder-style object to manufacture [`ReplicationConfigurationReplicatedDisk`](crate::model::ReplicationConfigurationReplicatedDisk) pub fn builder() -> crate::model::replication_configuration_replicated_disk::Builder { crate::model::replication_configuration_replicated_disk::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ReplicationConfigurationReplicatedDiskStagingDiskType { #[allow(missing_docs)] // documentation missing in model Auto, #[allow(missing_docs)] // documentation missing in model Gp2, #[allow(missing_docs)] // documentation missing in model Gp3, #[allow(missing_docs)] // documentation missing in model Io1, #[allow(missing_docs)] // documentation missing in model Sc1, #[allow(missing_docs)] // documentation missing in model St1, #[allow(missing_docs)] // documentation missing in model Standard, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ReplicationConfigurationReplicatedDiskStagingDiskType { fn from(s: &str) -> Self { match s { "AUTO" => ReplicationConfigurationReplicatedDiskStagingDiskType::Auto, "GP2" => ReplicationConfigurationReplicatedDiskStagingDiskType::Gp2, "GP3" => ReplicationConfigurationReplicatedDiskStagingDiskType::Gp3, "IO1" => ReplicationConfigurationReplicatedDiskStagingDiskType::Io1, "SC1" => ReplicationConfigurationReplicatedDiskStagingDiskType::Sc1, "ST1" => ReplicationConfigurationReplicatedDiskStagingDiskType::St1, "STANDARD" => ReplicationConfigurationReplicatedDiskStagingDiskType::Standard, other => { ReplicationConfigurationReplicatedDiskStagingDiskType::Unknown(other.to_owned()) } } } } impl std::str::FromStr for ReplicationConfigurationReplicatedDiskStagingDiskType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ReplicationConfigurationReplicatedDiskStagingDiskType::from( s, )) } } impl ReplicationConfigurationReplicatedDiskStagingDiskType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ReplicationConfigurationReplicatedDiskStagingDiskType::Auto => "AUTO", ReplicationConfigurationReplicatedDiskStagingDiskType::Gp2 => "GP2", ReplicationConfigurationReplicatedDiskStagingDiskType::Gp3 => "GP3", ReplicationConfigurationReplicatedDiskStagingDiskType::Io1 => "IO1", ReplicationConfigurationReplicatedDiskStagingDiskType::Sc1 => "SC1", ReplicationConfigurationReplicatedDiskStagingDiskType::St1 => "ST1", ReplicationConfigurationReplicatedDiskStagingDiskType::Standard => "STANDARD", ReplicationConfigurationReplicatedDiskStagingDiskType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["AUTO", "GP2", "GP3", "IO1", "SC1", "ST1", "STANDARD"] } } impl AsRef<str> for ReplicationConfigurationReplicatedDiskStagingDiskType { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ReplicationConfigurationDefaultLargeStagingDiskType { #[allow(missing_docs)] // documentation missing in model Gp2, #[allow(missing_docs)] // documentation missing in model Gp3, #[allow(missing_docs)] // documentation missing in model St1, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ReplicationConfigurationDefaultLargeStagingDiskType { fn from(s: &str) -> Self { match s { "GP2" => ReplicationConfigurationDefaultLargeStagingDiskType::Gp2, "GP3" => ReplicationConfigurationDefaultLargeStagingDiskType::Gp3, "ST1" => ReplicationConfigurationDefaultLargeStagingDiskType::St1, other => ReplicationConfigurationDefaultLargeStagingDiskType::Unknown(other.to_owned()), } } } impl std::str::FromStr for ReplicationConfigurationDefaultLargeStagingDiskType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ReplicationConfigurationDefaultLargeStagingDiskType::from(s)) } } impl ReplicationConfigurationDefaultLargeStagingDiskType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ReplicationConfigurationDefaultLargeStagingDiskType::Gp2 => "GP2", ReplicationConfigurationDefaultLargeStagingDiskType::Gp3 => "GP3", ReplicationConfigurationDefaultLargeStagingDiskType::St1 => "ST1", ReplicationConfigurationDefaultLargeStagingDiskType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["GP2", "GP3", "ST1"] } } impl AsRef<str> for ReplicationConfigurationDefaultLargeStagingDiskType { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Configuration of a machine's license.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Licensing { /// <p>Whether to enable "Bring your own license" or not.</p> pub os_byol: std::option::Option<bool>, } impl Licensing { /// <p>Whether to enable "Bring your own license" or not.</p> pub fn os_byol(&self) -> std::option::Option<bool> { self.os_byol } } impl std::fmt::Debug for Licensing { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Licensing"); formatter.field("os_byol", &self.os_byol); formatter.finish() } } /// See [`Licensing`](crate::model::Licensing) pub mod licensing { /// A builder for [`Licensing`](crate::model::Licensing) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) os_byol: std::option::Option<bool>, } impl Builder { /// <p>Whether to enable "Bring your own license" or not.</p> pub fn os_byol(mut self, input: bool) -> Self { self.os_byol = Some(input); self } /// <p>Whether to enable "Bring your own license" or not.</p> pub fn set_os_byol(mut self, input: std::option::Option<bool>) -> Self { self.os_byol = input; self } /// Consumes the builder and constructs a [`Licensing`](crate::model::Licensing) pub fn build(self) -> crate::model::Licensing { crate::model::Licensing { os_byol: self.os_byol, } } } } impl Licensing { /// Creates a new builder-style object to manufacture [`Licensing`](crate::model::Licensing) pub fn builder() -> crate::model::licensing::Builder { crate::model::licensing::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum TargetInstanceTypeRightSizingMethod { #[allow(missing_docs)] // documentation missing in model Basic, #[allow(missing_docs)] // documentation missing in model None, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for TargetInstanceTypeRightSizingMethod { fn from(s: &str) -> Self { match s { "BASIC" => TargetInstanceTypeRightSizingMethod::Basic, "NONE" => TargetInstanceTypeRightSizingMethod::None, other => TargetInstanceTypeRightSizingMethod::Unknown(other.to_owned()), } } } impl std::str::FromStr for TargetInstanceTypeRightSizingMethod { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(TargetInstanceTypeRightSizingMethod::from(s)) } } impl TargetInstanceTypeRightSizingMethod { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { TargetInstanceTypeRightSizingMethod::Basic => "BASIC", TargetInstanceTypeRightSizingMethod::None => "NONE", TargetInstanceTypeRightSizingMethod::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["BASIC", "NONE"] } } impl AsRef<str> for TargetInstanceTypeRightSizingMethod { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum LaunchDisposition { #[allow(missing_docs)] // documentation missing in model Started, #[allow(missing_docs)] // documentation missing in model Stopped, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for LaunchDisposition { fn from(s: &str) -> Self { match s { "STARTED" => LaunchDisposition::Started, "STOPPED" => LaunchDisposition::Stopped, other => LaunchDisposition::Unknown(other.to_owned()), } } } impl std::str::FromStr for LaunchDisposition { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(LaunchDisposition::from(s)) } } impl LaunchDisposition { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { LaunchDisposition::Started => "STARTED", LaunchDisposition::Stopped => "STOPPED", LaunchDisposition::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["STARTED", "STOPPED"] } } impl AsRef<str> for LaunchDisposition { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Properties of the Source Server machine.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct SourceProperties { /// <p>The date and time the Source Properties were last updated on.</p> pub last_updated_date_time: std::option::Option<std::string::String>, /// <p>The recommended EC2 instance type that will be used when recovering the Source Server.</p> pub recommended_instance_type: std::option::Option<std::string::String>, /// <p>Hints used to uniquely identify a machine.</p> pub identification_hints: std::option::Option<crate::model::IdentificationHints>, /// <p>An array of network interfaces.</p> pub network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, /// <p>An array of disks.</p> pub disks: std::option::Option<std::vec::Vec<crate::model::Disk>>, /// <p>An array of CPUs.</p> pub cpus: std::option::Option<std::vec::Vec<crate::model::Cpu>>, /// <p>The amount of RAM in bytes.</p> pub ram_bytes: i64, /// <p>Operating system.</p> pub os: std::option::Option<crate::model::Os>, } impl SourceProperties { /// <p>The date and time the Source Properties were last updated on.</p> pub fn last_updated_date_time(&self) -> std::option::Option<&str> { self.last_updated_date_time.as_deref() } /// <p>The recommended EC2 instance type that will be used when recovering the Source Server.</p> pub fn recommended_instance_type(&self) -> std::option::Option<&str> { self.recommended_instance_type.as_deref() } /// <p>Hints used to uniquely identify a machine.</p> pub fn identification_hints(&self) -> std::option::Option<&crate::model::IdentificationHints> { self.identification_hints.as_ref() } /// <p>An array of network interfaces.</p> pub fn network_interfaces(&self) -> std::option::Option<&[crate::model::NetworkInterface]> { self.network_interfaces.as_deref() } /// <p>An array of disks.</p> pub fn disks(&self) -> std::option::Option<&[crate::model::Disk]> { self.disks.as_deref() } /// <p>An array of CPUs.</p> pub fn cpus(&self) -> std::option::Option<&[crate::model::Cpu]> { self.cpus.as_deref() } /// <p>The amount of RAM in bytes.</p> pub fn ram_bytes(&self) -> i64 { self.ram_bytes } /// <p>Operating system.</p> pub fn os(&self) -> std::option::Option<&crate::model::Os> { self.os.as_ref() } } impl std::fmt::Debug for SourceProperties { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("SourceProperties"); formatter.field("last_updated_date_time", &self.last_updated_date_time); formatter.field("recommended_instance_type", &self.recommended_instance_type); formatter.field("identification_hints", &self.identification_hints); formatter.field("network_interfaces", &self.network_interfaces); formatter.field("disks", &self.disks); formatter.field("cpus", &self.cpus); formatter.field("ram_bytes", &self.ram_bytes); formatter.field("os", &self.os); formatter.finish() } } /// See [`SourceProperties`](crate::model::SourceProperties) pub mod source_properties { /// A builder for [`SourceProperties`](crate::model::SourceProperties) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) last_updated_date_time: std::option::Option<std::string::String>, pub(crate) recommended_instance_type: std::option::Option<std::string::String>, pub(crate) identification_hints: std::option::Option<crate::model::IdentificationHints>, pub(crate) network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, pub(crate) disks: std::option::Option<std::vec::Vec<crate::model::Disk>>, pub(crate) cpus: std::option::Option<std::vec::Vec<crate::model::Cpu>>, pub(crate) ram_bytes: std::option::Option<i64>, pub(crate) os: std::option::Option<crate::model::Os>, } impl Builder { /// <p>The date and time the Source Properties were last updated on.</p> pub fn last_updated_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.last_updated_date_time = Some(input.into()); self } /// <p>The date and time the Source Properties were last updated on.</p> pub fn set_last_updated_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_updated_date_time = input; self } /// <p>The recommended EC2 instance type that will be used when recovering the Source Server.</p> pub fn recommended_instance_type(mut self, input: impl Into<std::string::String>) -> Self { self.recommended_instance_type = Some(input.into()); self } /// <p>The recommended EC2 instance type that will be used when recovering the Source Server.</p> pub fn set_recommended_instance_type( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recommended_instance_type = input; self } /// <p>Hints used to uniquely identify a machine.</p> pub fn identification_hints(mut self, input: crate::model::IdentificationHints) -> Self { self.identification_hints = Some(input); self } /// <p>Hints used to uniquely identify a machine.</p> pub fn set_identification_hints( mut self, input: std::option::Option<crate::model::IdentificationHints>, ) -> Self { self.identification_hints = input; self } /// Appends an item to `network_interfaces`. /// /// To override the contents of this collection use [`set_network_interfaces`](Self::set_network_interfaces). /// /// <p>An array of network interfaces.</p> pub fn network_interfaces(mut self, input: crate::model::NetworkInterface) -> Self { let mut v = self.network_interfaces.unwrap_or_default(); v.push(input); self.network_interfaces = Some(v); self } /// <p>An array of network interfaces.</p> pub fn set_network_interfaces( mut self, input: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, ) -> Self { self.network_interfaces = input; self } /// Appends an item to `disks`. /// /// To override the contents of this collection use [`set_disks`](Self::set_disks). /// /// <p>An array of disks.</p> pub fn disks(mut self, input: crate::model::Disk) -> Self { let mut v = self.disks.unwrap_or_default(); v.push(input); self.disks = Some(v); self } /// <p>An array of disks.</p> pub fn set_disks( mut self, input: std::option::Option<std::vec::Vec<crate::model::Disk>>, ) -> Self { self.disks = input; self } /// Appends an item to `cpus`. /// /// To override the contents of this collection use [`set_cpus`](Self::set_cpus). /// /// <p>An array of CPUs.</p> pub fn cpus(mut self, input: crate::model::Cpu) -> Self { let mut v = self.cpus.unwrap_or_default(); v.push(input); self.cpus = Some(v); self } /// <p>An array of CPUs.</p> pub fn set_cpus( mut self, input: std::option::Option<std::vec::Vec<crate::model::Cpu>>, ) -> Self { self.cpus = input; self } /// <p>The amount of RAM in bytes.</p> pub fn ram_bytes(mut self, input: i64) -> Self { self.ram_bytes = Some(input); self } /// <p>The amount of RAM in bytes.</p> pub fn set_ram_bytes(mut self, input: std::option::Option<i64>) -> Self { self.ram_bytes = input; self } /// <p>Operating system.</p> pub fn os(mut self, input: crate::model::Os) -> Self { self.os = Some(input); self } /// <p>Operating system.</p> pub fn set_os(mut self, input: std::option::Option<crate::model::Os>) -> Self { self.os = input; self } /// Consumes the builder and constructs a [`SourceProperties`](crate::model::SourceProperties) pub fn build(self) -> crate::model::SourceProperties { crate::model::SourceProperties { last_updated_date_time: self.last_updated_date_time, recommended_instance_type: self.recommended_instance_type, identification_hints: self.identification_hints, network_interfaces: self.network_interfaces, disks: self.disks, cpus: self.cpus, ram_bytes: self.ram_bytes.unwrap_or_default(), os: self.os, } } } } impl SourceProperties { /// Creates a new builder-style object to manufacture [`SourceProperties`](crate::model::SourceProperties) pub fn builder() -> crate::model::source_properties::Builder { crate::model::source_properties::Builder::default() } } /// <p>Operating System.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Os { /// <p>The long name of the Operating System.</p> pub full_string: std::option::Option<std::string::String>, } impl Os { /// <p>The long name of the Operating System.</p> pub fn full_string(&self) -> std::option::Option<&str> { self.full_string.as_deref() } } impl std::fmt::Debug for Os { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Os"); formatter.field("full_string", &self.full_string); formatter.finish() } } /// See [`Os`](crate::model::Os) pub mod os { /// A builder for [`Os`](crate::model::Os) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) full_string: std::option::Option<std::string::String>, } impl Builder { /// <p>The long name of the Operating System.</p> pub fn full_string(mut self, input: impl Into<std::string::String>) -> Self { self.full_string = Some(input.into()); self } /// <p>The long name of the Operating System.</p> pub fn set_full_string(mut self, input: std::option::Option<std::string::String>) -> Self { self.full_string = input; self } /// Consumes the builder and constructs a [`Os`](crate::model::Os) pub fn build(self) -> crate::model::Os { crate::model::Os { full_string: self.full_string, } } } } impl Os { /// Creates a new builder-style object to manufacture [`Os`](crate::model::Os) pub fn builder() -> crate::model::os::Builder { crate::model::os::Builder::default() } } /// <p>Information about a server's CPU.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Cpu { /// <p>The number of CPU cores.</p> pub cores: i64, /// <p>The model name of the CPU.</p> pub model_name: std::option::Option<std::string::String>, } impl Cpu { /// <p>The number of CPU cores.</p> pub fn cores(&self) -> i64 { self.cores } /// <p>The model name of the CPU.</p> pub fn model_name(&self) -> std::option::Option<&str> { self.model_name.as_deref() } } impl std::fmt::Debug for Cpu { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Cpu"); formatter.field("cores", &self.cores); formatter.field("model_name", &self.model_name); formatter.finish() } } /// See [`Cpu`](crate::model::Cpu) pub mod cpu { /// A builder for [`Cpu`](crate::model::Cpu) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cores: std::option::Option<i64>, pub(crate) model_name: std::option::Option<std::string::String>, } impl Builder { /// <p>The number of CPU cores.</p> pub fn cores(mut self, input: i64) -> Self { self.cores = Some(input); self } /// <p>The number of CPU cores.</p> pub fn set_cores(mut self, input: std::option::Option<i64>) -> Self { self.cores = input; self } /// <p>The model name of the CPU.</p> pub fn model_name(mut self, input: impl Into<std::string::String>) -> Self { self.model_name = Some(input.into()); self } /// <p>The model name of the CPU.</p> pub fn set_model_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.model_name = input; self } /// Consumes the builder and constructs a [`Cpu`](crate::model::Cpu) pub fn build(self) -> crate::model::Cpu { crate::model::Cpu { cores: self.cores.unwrap_or_default(), model_name: self.model_name, } } } } impl Cpu { /// Creates a new builder-style object to manufacture [`Cpu`](crate::model::Cpu) pub fn builder() -> crate::model::cpu::Builder { crate::model::cpu::Builder::default() } } /// <p>An object representing a data storage device on a server.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Disk { /// <p>The disk or device name.</p> pub device_name: std::option::Option<std::string::String>, /// <p>The amount of storage on the disk in bytes.</p> pub bytes: i64, } impl Disk { /// <p>The disk or device name.</p> pub fn device_name(&self) -> std::option::Option<&str> { self.device_name.as_deref() } /// <p>The amount of storage on the disk in bytes.</p> pub fn bytes(&self) -> i64 { self.bytes } } impl std::fmt::Debug for Disk { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Disk"); formatter.field("device_name", &self.device_name); formatter.field("bytes", &self.bytes); formatter.finish() } } /// See [`Disk`](crate::model::Disk) pub mod disk { /// A builder for [`Disk`](crate::model::Disk) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) device_name: std::option::Option<std::string::String>, pub(crate) bytes: std::option::Option<i64>, } impl Builder { /// <p>The disk or device name.</p> pub fn device_name(mut self, input: impl Into<std::string::String>) -> Self { self.device_name = Some(input.into()); self } /// <p>The disk or device name.</p> pub fn set_device_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.device_name = input; self } /// <p>The amount of storage on the disk in bytes.</p> pub fn bytes(mut self, input: i64) -> Self { self.bytes = Some(input); self } /// <p>The amount of storage on the disk in bytes.</p> pub fn set_bytes(mut self, input: std::option::Option<i64>) -> Self { self.bytes = input; self } /// Consumes the builder and constructs a [`Disk`](crate::model::Disk) pub fn build(self) -> crate::model::Disk { crate::model::Disk { device_name: self.device_name, bytes: self.bytes.unwrap_or_default(), } } } } impl Disk { /// Creates a new builder-style object to manufacture [`Disk`](crate::model::Disk) pub fn builder() -> crate::model::disk::Builder { crate::model::disk::Builder::default() } } /// <p>Network interface.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct NetworkInterface { /// <p>The MAC address of the network interface.</p> pub mac_address: std::option::Option<std::string::String>, /// <p>Network interface IPs.</p> pub ips: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>Whether this is the primary network interface.</p> pub is_primary: std::option::Option<bool>, } impl NetworkInterface { /// <p>The MAC address of the network interface.</p> pub fn mac_address(&self) -> std::option::Option<&str> { self.mac_address.as_deref() } /// <p>Network interface IPs.</p> pub fn ips(&self) -> std::option::Option<&[std::string::String]> { self.ips.as_deref() } /// <p>Whether this is the primary network interface.</p> pub fn is_primary(&self) -> std::option::Option<bool> { self.is_primary } } impl std::fmt::Debug for NetworkInterface { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("NetworkInterface"); formatter.field("mac_address", &self.mac_address); formatter.field("ips", &self.ips); formatter.field("is_primary", &self.is_primary); formatter.finish() } } /// See [`NetworkInterface`](crate::model::NetworkInterface) pub mod network_interface { /// A builder for [`NetworkInterface`](crate::model::NetworkInterface) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) mac_address: std::option::Option<std::string::String>, pub(crate) ips: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) is_primary: std::option::Option<bool>, } impl Builder { /// <p>The MAC address of the network interface.</p> pub fn mac_address(mut self, input: impl Into<std::string::String>) -> Self { self.mac_address = Some(input.into()); self } /// <p>The MAC address of the network interface.</p> pub fn set_mac_address(mut self, input: std::option::Option<std::string::String>) -> Self { self.mac_address = input; self } /// Appends an item to `ips`. /// /// To override the contents of this collection use [`set_ips`](Self::set_ips). /// /// <p>Network interface IPs.</p> pub fn ips(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.ips.unwrap_or_default(); v.push(input.into()); self.ips = Some(v); self } /// <p>Network interface IPs.</p> pub fn set_ips( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.ips = input; self } /// <p>Whether this is the primary network interface.</p> pub fn is_primary(mut self, input: bool) -> Self { self.is_primary = Some(input); self } /// <p>Whether this is the primary network interface.</p> pub fn set_is_primary(mut self, input: std::option::Option<bool>) -> Self { self.is_primary = input; self } /// Consumes the builder and constructs a [`NetworkInterface`](crate::model::NetworkInterface) pub fn build(self) -> crate::model::NetworkInterface { crate::model::NetworkInterface { mac_address: self.mac_address, ips: self.ips, is_primary: self.is_primary, } } } } impl NetworkInterface { /// Creates a new builder-style object to manufacture [`NetworkInterface`](crate::model::NetworkInterface) pub fn builder() -> crate::model::network_interface::Builder { crate::model::network_interface::Builder::default() } } /// <p>Hints used to uniquely identify a machine.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct IdentificationHints { /// <p>Fully Qualified Domain Name identification hint.</p> pub fqdn: std::option::Option<std::string::String>, /// <p>Hostname identification hint.</p> pub hostname: std::option::Option<std::string::String>, /// <p>vCenter VM path identification hint.</p> pub vm_ware_uuid: std::option::Option<std::string::String>, /// <p>AWS Instance ID identification hint.</p> pub aws_instance_id: std::option::Option<std::string::String>, } impl IdentificationHints { /// <p>Fully Qualified Domain Name identification hint.</p> pub fn fqdn(&self) -> std::option::Option<&str> { self.fqdn.as_deref() } /// <p>Hostname identification hint.</p> pub fn hostname(&self) -> std::option::Option<&str> { self.hostname.as_deref() } /// <p>vCenter VM path identification hint.</p> pub fn vm_ware_uuid(&self) -> std::option::Option<&str> { self.vm_ware_uuid.as_deref() } /// <p>AWS Instance ID identification hint.</p> pub fn aws_instance_id(&self) -> std::option::Option<&str> { self.aws_instance_id.as_deref() } } impl std::fmt::Debug for IdentificationHints { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("IdentificationHints"); formatter.field("fqdn", &self.fqdn); formatter.field("hostname", &self.hostname); formatter.field("vm_ware_uuid", &self.vm_ware_uuid); formatter.field("aws_instance_id", &self.aws_instance_id); formatter.finish() } } /// See [`IdentificationHints`](crate::model::IdentificationHints) pub mod identification_hints { /// A builder for [`IdentificationHints`](crate::model::IdentificationHints) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) fqdn: std::option::Option<std::string::String>, pub(crate) hostname: std::option::Option<std::string::String>, pub(crate) vm_ware_uuid: std::option::Option<std::string::String>, pub(crate) aws_instance_id: std::option::Option<std::string::String>, } impl Builder { /// <p>Fully Qualified Domain Name identification hint.</p> pub fn fqdn(mut self, input: impl Into<std::string::String>) -> Self { self.fqdn = Some(input.into()); self } /// <p>Fully Qualified Domain Name identification hint.</p> pub fn set_fqdn(mut self, input: std::option::Option<std::string::String>) -> Self { self.fqdn = input; self } /// <p>Hostname identification hint.</p> pub fn hostname(mut self, input: impl Into<std::string::String>) -> Self { self.hostname = Some(input.into()); self } /// <p>Hostname identification hint.</p> pub fn set_hostname(mut self, input: std::option::Option<std::string::String>) -> Self { self.hostname = input; self } /// <p>vCenter VM path identification hint.</p> pub fn vm_ware_uuid(mut self, input: impl Into<std::string::String>) -> Self { self.vm_ware_uuid = Some(input.into()); self } /// <p>vCenter VM path identification hint.</p> pub fn set_vm_ware_uuid(mut self, input: std::option::Option<std::string::String>) -> Self { self.vm_ware_uuid = input; self } /// <p>AWS Instance ID identification hint.</p> pub fn aws_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.aws_instance_id = Some(input.into()); self } /// <p>AWS Instance ID identification hint.</p> pub fn set_aws_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.aws_instance_id = input; self } /// Consumes the builder and constructs a [`IdentificationHints`](crate::model::IdentificationHints) pub fn build(self) -> crate::model::IdentificationHints { crate::model::IdentificationHints { fqdn: self.fqdn, hostname: self.hostname, vm_ware_uuid: self.vm_ware_uuid, aws_instance_id: self.aws_instance_id, } } } } impl IdentificationHints { /// Creates a new builder-style object to manufacture [`IdentificationHints`](crate::model::IdentificationHints) pub fn builder() -> crate::model::identification_hints::Builder { crate::model::identification_hints::Builder::default() } } /// <p>An object representing the Source Server Lifecycle.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct LifeCycle { /// <p>The date and time of when the Source Server was added to the service.</p> pub added_to_service_date_time: std::option::Option<std::string::String>, /// <p>The date and time of the first byte that was replicated from the Source Server.</p> pub first_byte_date_time: std::option::Option<std::string::String>, /// <p>The amount of time that the Source Server has been replicating for.</p> pub elapsed_replication_duration: std::option::Option<std::string::String>, /// <p>The date and time this Source Server was last seen by the service.</p> pub last_seen_by_service_date_time: std::option::Option<std::string::String>, /// <p>An object containing information regarding the last launch of the Source Server.</p> pub last_launch: std::option::Option<crate::model::LifeCycleLastLaunch>, } impl LifeCycle { /// <p>The date and time of when the Source Server was added to the service.</p> pub fn added_to_service_date_time(&self) -> std::option::Option<&str> { self.added_to_service_date_time.as_deref() } /// <p>The date and time of the first byte that was replicated from the Source Server.</p> pub fn first_byte_date_time(&self) -> std::option::Option<&str> { self.first_byte_date_time.as_deref() } /// <p>The amount of time that the Source Server has been replicating for.</p> pub fn elapsed_replication_duration(&self) -> std::option::Option<&str> { self.elapsed_replication_duration.as_deref() } /// <p>The date and time this Source Server was last seen by the service.</p> pub fn last_seen_by_service_date_time(&self) -> std::option::Option<&str> { self.last_seen_by_service_date_time.as_deref() } /// <p>An object containing information regarding the last launch of the Source Server.</p> pub fn last_launch(&self) -> std::option::Option<&crate::model::LifeCycleLastLaunch> { self.last_launch.as_ref() } } impl std::fmt::Debug for LifeCycle { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("LifeCycle"); formatter.field( "added_to_service_date_time", &self.added_to_service_date_time, ); formatter.field("first_byte_date_time", &self.first_byte_date_time); formatter.field( "elapsed_replication_duration", &self.elapsed_replication_duration, ); formatter.field( "last_seen_by_service_date_time", &self.last_seen_by_service_date_time, ); formatter.field("last_launch", &self.last_launch); formatter.finish() } } /// See [`LifeCycle`](crate::model::LifeCycle) pub mod life_cycle { /// A builder for [`LifeCycle`](crate::model::LifeCycle) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) added_to_service_date_time: std::option::Option<std::string::String>, pub(crate) first_byte_date_time: std::option::Option<std::string::String>, pub(crate) elapsed_replication_duration: std::option::Option<std::string::String>, pub(crate) last_seen_by_service_date_time: std::option::Option<std::string::String>, pub(crate) last_launch: std::option::Option<crate::model::LifeCycleLastLaunch>, } impl Builder { /// <p>The date and time of when the Source Server was added to the service.</p> pub fn added_to_service_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.added_to_service_date_time = Some(input.into()); self } /// <p>The date and time of when the Source Server was added to the service.</p> pub fn set_added_to_service_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.added_to_service_date_time = input; self } /// <p>The date and time of the first byte that was replicated from the Source Server.</p> pub fn first_byte_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.first_byte_date_time = Some(input.into()); self } /// <p>The date and time of the first byte that was replicated from the Source Server.</p> pub fn set_first_byte_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.first_byte_date_time = input; self } /// <p>The amount of time that the Source Server has been replicating for.</p> pub fn elapsed_replication_duration( mut self, input: impl Into<std::string::String>, ) -> Self { self.elapsed_replication_duration = Some(input.into()); self } /// <p>The amount of time that the Source Server has been replicating for.</p> pub fn set_elapsed_replication_duration( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.elapsed_replication_duration = input; self } /// <p>The date and time this Source Server was last seen by the service.</p> pub fn last_seen_by_service_date_time( mut self, input: impl Into<std::string::String>, ) -> Self { self.last_seen_by_service_date_time = Some(input.into()); self } /// <p>The date and time this Source Server was last seen by the service.</p> pub fn set_last_seen_by_service_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_seen_by_service_date_time = input; self } /// <p>An object containing information regarding the last launch of the Source Server.</p> pub fn last_launch(mut self, input: crate::model::LifeCycleLastLaunch) -> Self { self.last_launch = Some(input); self } /// <p>An object containing information regarding the last launch of the Source Server.</p> pub fn set_last_launch( mut self, input: std::option::Option<crate::model::LifeCycleLastLaunch>, ) -> Self { self.last_launch = input; self } /// Consumes the builder and constructs a [`LifeCycle`](crate::model::LifeCycle) pub fn build(self) -> crate::model::LifeCycle { crate::model::LifeCycle { added_to_service_date_time: self.added_to_service_date_time, first_byte_date_time: self.first_byte_date_time, elapsed_replication_duration: self.elapsed_replication_duration, last_seen_by_service_date_time: self.last_seen_by_service_date_time, last_launch: self.last_launch, } } } } impl LifeCycle { /// Creates a new builder-style object to manufacture [`LifeCycle`](crate::model::LifeCycle) pub fn builder() -> crate::model::life_cycle::Builder { crate::model::life_cycle::Builder::default() } } /// <p>An object containing information regarding the last launch of a Source Server.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct LifeCycleLastLaunch { /// <p>An object containing information regarding the initiation of the last launch of a Source Server.</p> pub initiated: std::option::Option<crate::model::LifeCycleLastLaunchInitiated>, } impl LifeCycleLastLaunch { /// <p>An object containing information regarding the initiation of the last launch of a Source Server.</p> pub fn initiated(&self) -> std::option::Option<&crate::model::LifeCycleLastLaunchInitiated> { self.initiated.as_ref() } } impl std::fmt::Debug for LifeCycleLastLaunch { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("LifeCycleLastLaunch"); formatter.field("initiated", &self.initiated); formatter.finish() } } /// See [`LifeCycleLastLaunch`](crate::model::LifeCycleLastLaunch) pub mod life_cycle_last_launch { /// A builder for [`LifeCycleLastLaunch`](crate::model::LifeCycleLastLaunch) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) initiated: std::option::Option<crate::model::LifeCycleLastLaunchInitiated>, } impl Builder { /// <p>An object containing information regarding the initiation of the last launch of a Source Server.</p> pub fn initiated(mut self, input: crate::model::LifeCycleLastLaunchInitiated) -> Self { self.initiated = Some(input); self } /// <p>An object containing information regarding the initiation of the last launch of a Source Server.</p> pub fn set_initiated( mut self, input: std::option::Option<crate::model::LifeCycleLastLaunchInitiated>, ) -> Self { self.initiated = input; self } /// Consumes the builder and constructs a [`LifeCycleLastLaunch`](crate::model::LifeCycleLastLaunch) pub fn build(self) -> crate::model::LifeCycleLastLaunch { crate::model::LifeCycleLastLaunch { initiated: self.initiated, } } } } impl LifeCycleLastLaunch { /// Creates a new builder-style object to manufacture [`LifeCycleLastLaunch`](crate::model::LifeCycleLastLaunch) pub fn builder() -> crate::model::life_cycle_last_launch::Builder { crate::model::life_cycle_last_launch::Builder::default() } } /// <p>An object containing information regarding the initiation of the last launch of a Source Server.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct LifeCycleLastLaunchInitiated { /// <p>The date and time the last Source Server launch was initiated.</p> pub api_call_date_time: std::option::Option<std::string::String>, /// <p>The ID of the Job that was used to last launch the Source Server.</p> pub job_id: std::option::Option<std::string::String>, /// <p>The Job type that was used to last launch the Source Server.</p> pub r#type: std::option::Option<crate::model::LastLaunchType>, } impl LifeCycleLastLaunchInitiated { /// <p>The date and time the last Source Server launch was initiated.</p> pub fn api_call_date_time(&self) -> std::option::Option<&str> { self.api_call_date_time.as_deref() } /// <p>The ID of the Job that was used to last launch the Source Server.</p> pub fn job_id(&self) -> std::option::Option<&str> { self.job_id.as_deref() } /// <p>The Job type that was used to last launch the Source Server.</p> pub fn r#type(&self) -> std::option::Option<&crate::model::LastLaunchType> { self.r#type.as_ref() } } impl std::fmt::Debug for LifeCycleLastLaunchInitiated { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("LifeCycleLastLaunchInitiated"); formatter.field("api_call_date_time", &self.api_call_date_time); formatter.field("job_id", &self.job_id); formatter.field("r#type", &self.r#type); formatter.finish() } } /// See [`LifeCycleLastLaunchInitiated`](crate::model::LifeCycleLastLaunchInitiated) pub mod life_cycle_last_launch_initiated { /// A builder for [`LifeCycleLastLaunchInitiated`](crate::model::LifeCycleLastLaunchInitiated) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) api_call_date_time: std::option::Option<std::string::String>, pub(crate) job_id: std::option::Option<std::string::String>, pub(crate) r#type: std::option::Option<crate::model::LastLaunchType>, } impl Builder { /// <p>The date and time the last Source Server launch was initiated.</p> pub fn api_call_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.api_call_date_time = Some(input.into()); self } /// <p>The date and time the last Source Server launch was initiated.</p> pub fn set_api_call_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.api_call_date_time = input; self } /// <p>The ID of the Job that was used to last launch the Source Server.</p> pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self { self.job_id = Some(input.into()); self } /// <p>The ID of the Job that was used to last launch the Source Server.</p> pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.job_id = input; self } /// <p>The Job type that was used to last launch the Source Server.</p> pub fn r#type(mut self, input: crate::model::LastLaunchType) -> Self { self.r#type = Some(input); self } /// <p>The Job type that was used to last launch the Source Server.</p> pub fn set_type( mut self, input: std::option::Option<crate::model::LastLaunchType>, ) -> Self { self.r#type = input; self } /// Consumes the builder and constructs a [`LifeCycleLastLaunchInitiated`](crate::model::LifeCycleLastLaunchInitiated) pub fn build(self) -> crate::model::LifeCycleLastLaunchInitiated { crate::model::LifeCycleLastLaunchInitiated { api_call_date_time: self.api_call_date_time, job_id: self.job_id, r#type: self.r#type, } } } } impl LifeCycleLastLaunchInitiated { /// Creates a new builder-style object to manufacture [`LifeCycleLastLaunchInitiated`](crate::model::LifeCycleLastLaunchInitiated) pub fn builder() -> crate::model::life_cycle_last_launch_initiated::Builder { crate::model::life_cycle_last_launch_initiated::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum LastLaunchType { #[allow(missing_docs)] // documentation missing in model Drill, #[allow(missing_docs)] // documentation missing in model Recovery, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for LastLaunchType { fn from(s: &str) -> Self { match s { "DRILL" => LastLaunchType::Drill, "RECOVERY" => LastLaunchType::Recovery, other => LastLaunchType::Unknown(other.to_owned()), } } } impl std::str::FromStr for LastLaunchType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(LastLaunchType::from(s)) } } impl LastLaunchType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { LastLaunchType::Drill => "DRILL", LastLaunchType::Recovery => "RECOVERY", LastLaunchType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["DRILL", "RECOVERY"] } } impl AsRef<str> for LastLaunchType { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Information about Data Replication</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataReplicationInfo { /// <p>Data replication lag duration.</p> pub lag_duration: std::option::Option<std::string::String>, /// <p>An estimate of when the data replication will be completed.</p> pub eta_date_time: std::option::Option<std::string::String>, /// <p>The disks that should be replicated.</p> pub replicated_disks: std::option::Option<std::vec::Vec<crate::model::DataReplicationInfoReplicatedDisk>>, /// <p>The state of the data replication.</p> pub data_replication_state: std::option::Option<crate::model::DataReplicationState>, /// <p>Information about whether the data replication has been initiated.</p> pub data_replication_initiation: std::option::Option<crate::model::DataReplicationInitiation>, /// <p>Error in data replication.</p> pub data_replication_error: std::option::Option<crate::model::DataReplicationError>, } impl DataReplicationInfo { /// <p>Data replication lag duration.</p> pub fn lag_duration(&self) -> std::option::Option<&str> { self.lag_duration.as_deref() } /// <p>An estimate of when the data replication will be completed.</p> pub fn eta_date_time(&self) -> std::option::Option<&str> { self.eta_date_time.as_deref() } /// <p>The disks that should be replicated.</p> pub fn replicated_disks( &self, ) -> std::option::Option<&[crate::model::DataReplicationInfoReplicatedDisk]> { self.replicated_disks.as_deref() } /// <p>The state of the data replication.</p> pub fn data_replication_state( &self, ) -> std::option::Option<&crate::model::DataReplicationState> { self.data_replication_state.as_ref() } /// <p>Information about whether the data replication has been initiated.</p> pub fn data_replication_initiation( &self, ) -> std::option::Option<&crate::model::DataReplicationInitiation> { self.data_replication_initiation.as_ref() } /// <p>Error in data replication.</p> pub fn data_replication_error( &self, ) -> std::option::Option<&crate::model::DataReplicationError> { self.data_replication_error.as_ref() } } impl std::fmt::Debug for DataReplicationInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataReplicationInfo"); formatter.field("lag_duration", &self.lag_duration); formatter.field("eta_date_time", &self.eta_date_time); formatter.field("replicated_disks", &self.replicated_disks); formatter.field("data_replication_state", &self.data_replication_state); formatter.field( "data_replication_initiation", &self.data_replication_initiation, ); formatter.field("data_replication_error", &self.data_replication_error); formatter.finish() } } /// See [`DataReplicationInfo`](crate::model::DataReplicationInfo) pub mod data_replication_info { /// A builder for [`DataReplicationInfo`](crate::model::DataReplicationInfo) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) lag_duration: std::option::Option<std::string::String>, pub(crate) eta_date_time: std::option::Option<std::string::String>, pub(crate) replicated_disks: std::option::Option<std::vec::Vec<crate::model::DataReplicationInfoReplicatedDisk>>, pub(crate) data_replication_state: std::option::Option<crate::model::DataReplicationState>, pub(crate) data_replication_initiation: std::option::Option<crate::model::DataReplicationInitiation>, pub(crate) data_replication_error: std::option::Option<crate::model::DataReplicationError>, } impl Builder { /// <p>Data replication lag duration.</p> pub fn lag_duration(mut self, input: impl Into<std::string::String>) -> Self { self.lag_duration = Some(input.into()); self } /// <p>Data replication lag duration.</p> pub fn set_lag_duration(mut self, input: std::option::Option<std::string::String>) -> Self { self.lag_duration = input; self } /// <p>An estimate of when the data replication will be completed.</p> pub fn eta_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.eta_date_time = Some(input.into()); self } /// <p>An estimate of when the data replication will be completed.</p> pub fn set_eta_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.eta_date_time = input; self } /// Appends an item to `replicated_disks`. /// /// To override the contents of this collection use [`set_replicated_disks`](Self::set_replicated_disks). /// /// <p>The disks that should be replicated.</p> pub fn replicated_disks( mut self, input: crate::model::DataReplicationInfoReplicatedDisk, ) -> Self { let mut v = self.replicated_disks.unwrap_or_default(); v.push(input); self.replicated_disks = Some(v); self } /// <p>The disks that should be replicated.</p> pub fn set_replicated_disks( mut self, input: std::option::Option< std::vec::Vec<crate::model::DataReplicationInfoReplicatedDisk>, >, ) -> Self { self.replicated_disks = input; self } /// <p>The state of the data replication.</p> pub fn data_replication_state(mut self, input: crate::model::DataReplicationState) -> Self { self.data_replication_state = Some(input); self } /// <p>The state of the data replication.</p> pub fn set_data_replication_state( mut self, input: std::option::Option<crate::model::DataReplicationState>, ) -> Self { self.data_replication_state = input; self } /// <p>Information about whether the data replication has been initiated.</p> pub fn data_replication_initiation( mut self, input: crate::model::DataReplicationInitiation, ) -> Self { self.data_replication_initiation = Some(input); self } /// <p>Information about whether the data replication has been initiated.</p> pub fn set_data_replication_initiation( mut self, input: std::option::Option<crate::model::DataReplicationInitiation>, ) -> Self { self.data_replication_initiation = input; self } /// <p>Error in data replication.</p> pub fn data_replication_error(mut self, input: crate::model::DataReplicationError) -> Self { self.data_replication_error = Some(input); self } /// <p>Error in data replication.</p> pub fn set_data_replication_error( mut self, input: std::option::Option<crate::model::DataReplicationError>, ) -> Self { self.data_replication_error = input; self } /// Consumes the builder and constructs a [`DataReplicationInfo`](crate::model::DataReplicationInfo) pub fn build(self) -> crate::model::DataReplicationInfo { crate::model::DataReplicationInfo { lag_duration: self.lag_duration, eta_date_time: self.eta_date_time, replicated_disks: self.replicated_disks, data_replication_state: self.data_replication_state, data_replication_initiation: self.data_replication_initiation, data_replication_error: self.data_replication_error, } } } } impl DataReplicationInfo { /// Creates a new builder-style object to manufacture [`DataReplicationInfo`](crate::model::DataReplicationInfo) pub fn builder() -> crate::model::data_replication_info::Builder { crate::model::data_replication_info::Builder::default() } } /// <p>Error in data replication.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataReplicationError { /// <p>Error in data replication.</p> pub error: std::option::Option<crate::model::DataReplicationErrorString>, /// <p>Error in data replication.</p> pub raw_error: std::option::Option<std::string::String>, } impl DataReplicationError { /// <p>Error in data replication.</p> pub fn error(&self) -> std::option::Option<&crate::model::DataReplicationErrorString> { self.error.as_ref() } /// <p>Error in data replication.</p> pub fn raw_error(&self) -> std::option::Option<&str> { self.raw_error.as_deref() } } impl std::fmt::Debug for DataReplicationError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataReplicationError"); formatter.field("error", &self.error); formatter.field("raw_error", &self.raw_error); formatter.finish() } } /// See [`DataReplicationError`](crate::model::DataReplicationError) pub mod data_replication_error { /// A builder for [`DataReplicationError`](crate::model::DataReplicationError) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) error: std::option::Option<crate::model::DataReplicationErrorString>, pub(crate) raw_error: std::option::Option<std::string::String>, } impl Builder { /// <p>Error in data replication.</p> pub fn error(mut self, input: crate::model::DataReplicationErrorString) -> Self { self.error = Some(input); self } /// <p>Error in data replication.</p> pub fn set_error( mut self, input: std::option::Option<crate::model::DataReplicationErrorString>, ) -> Self { self.error = input; self } /// <p>Error in data replication.</p> pub fn raw_error(mut self, input: impl Into<std::string::String>) -> Self { self.raw_error = Some(input.into()); self } /// <p>Error in data replication.</p> pub fn set_raw_error(mut self, input: std::option::Option<std::string::String>) -> Self { self.raw_error = input; self } /// Consumes the builder and constructs a [`DataReplicationError`](crate::model::DataReplicationError) pub fn build(self) -> crate::model::DataReplicationError { crate::model::DataReplicationError { error: self.error, raw_error: self.raw_error, } } } } impl DataReplicationError { /// Creates a new builder-style object to manufacture [`DataReplicationError`](crate::model::DataReplicationError) pub fn builder() -> crate::model::data_replication_error::Builder { crate::model::data_replication_error::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum DataReplicationErrorString { #[allow(missing_docs)] // documentation missing in model AgentNotSeen, #[allow(missing_docs)] // documentation missing in model FailedToAttachStagingDisks, #[allow(missing_docs)] // documentation missing in model FailedToAuthenticateWithService, #[allow(missing_docs)] // documentation missing in model FailedToBootReplicationServer, #[allow(missing_docs)] // documentation missing in model FailedToConnectAgentToReplicationServer, #[allow(missing_docs)] // documentation missing in model FailedToCreateSecurityGroup, #[allow(missing_docs)] // documentation missing in model FailedToCreateStagingDisks, #[allow(missing_docs)] // documentation missing in model FailedToDownloadReplicationSoftware, #[allow(missing_docs)] // documentation missing in model FailedToLaunchReplicationServer, #[allow(missing_docs)] // documentation missing in model FailedToPairReplicationServerWithAgent, #[allow(missing_docs)] // documentation missing in model FailedToStartDataTransfer, #[allow(missing_docs)] // documentation missing in model NotConverging, #[allow(missing_docs)] // documentation missing in model SnapshotsFailure, #[allow(missing_docs)] // documentation missing in model UnstableNetwork, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for DataReplicationErrorString { fn from(s: &str) -> Self { match s { "AGENT_NOT_SEEN" => DataReplicationErrorString::AgentNotSeen, "FAILED_TO_ATTACH_STAGING_DISKS" => { DataReplicationErrorString::FailedToAttachStagingDisks } "FAILED_TO_AUTHENTICATE_WITH_SERVICE" => { DataReplicationErrorString::FailedToAuthenticateWithService } "FAILED_TO_BOOT_REPLICATION_SERVER" => { DataReplicationErrorString::FailedToBootReplicationServer } "FAILED_TO_CONNECT_AGENT_TO_REPLICATION_SERVER" => { DataReplicationErrorString::FailedToConnectAgentToReplicationServer } "FAILED_TO_CREATE_SECURITY_GROUP" => { DataReplicationErrorString::FailedToCreateSecurityGroup } "FAILED_TO_CREATE_STAGING_DISKS" => { DataReplicationErrorString::FailedToCreateStagingDisks } "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE" => { DataReplicationErrorString::FailedToDownloadReplicationSoftware } "FAILED_TO_LAUNCH_REPLICATION_SERVER" => { DataReplicationErrorString::FailedToLaunchReplicationServer } "FAILED_TO_PAIR_REPLICATION_SERVER_WITH_AGENT" => { DataReplicationErrorString::FailedToPairReplicationServerWithAgent } "FAILED_TO_START_DATA_TRANSFER" => { DataReplicationErrorString::FailedToStartDataTransfer } "NOT_CONVERGING" => DataReplicationErrorString::NotConverging, "SNAPSHOTS_FAILURE" => DataReplicationErrorString::SnapshotsFailure, "UNSTABLE_NETWORK" => DataReplicationErrorString::UnstableNetwork, other => DataReplicationErrorString::Unknown(other.to_owned()), } } } impl std::str::FromStr for DataReplicationErrorString { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(DataReplicationErrorString::from(s)) } } impl DataReplicationErrorString { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { DataReplicationErrorString::AgentNotSeen => "AGENT_NOT_SEEN", DataReplicationErrorString::FailedToAttachStagingDisks => { "FAILED_TO_ATTACH_STAGING_DISKS" } DataReplicationErrorString::FailedToAuthenticateWithService => { "FAILED_TO_AUTHENTICATE_WITH_SERVICE" } DataReplicationErrorString::FailedToBootReplicationServer => { "FAILED_TO_BOOT_REPLICATION_SERVER" } DataReplicationErrorString::FailedToConnectAgentToReplicationServer => { "FAILED_TO_CONNECT_AGENT_TO_REPLICATION_SERVER" } DataReplicationErrorString::FailedToCreateSecurityGroup => { "FAILED_TO_CREATE_SECURITY_GROUP" } DataReplicationErrorString::FailedToCreateStagingDisks => { "FAILED_TO_CREATE_STAGING_DISKS" } DataReplicationErrorString::FailedToDownloadReplicationSoftware => { "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE" } DataReplicationErrorString::FailedToLaunchReplicationServer => { "FAILED_TO_LAUNCH_REPLICATION_SERVER" } DataReplicationErrorString::FailedToPairReplicationServerWithAgent => { "FAILED_TO_PAIR_REPLICATION_SERVER_WITH_AGENT" } DataReplicationErrorString::FailedToStartDataTransfer => { "FAILED_TO_START_DATA_TRANSFER" } DataReplicationErrorString::NotConverging => "NOT_CONVERGING", DataReplicationErrorString::SnapshotsFailure => "SNAPSHOTS_FAILURE", DataReplicationErrorString::UnstableNetwork => "UNSTABLE_NETWORK", DataReplicationErrorString::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "AGENT_NOT_SEEN", "FAILED_TO_ATTACH_STAGING_DISKS", "FAILED_TO_AUTHENTICATE_WITH_SERVICE", "FAILED_TO_BOOT_REPLICATION_SERVER", "FAILED_TO_CONNECT_AGENT_TO_REPLICATION_SERVER", "FAILED_TO_CREATE_SECURITY_GROUP", "FAILED_TO_CREATE_STAGING_DISKS", "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE", "FAILED_TO_LAUNCH_REPLICATION_SERVER", "FAILED_TO_PAIR_REPLICATION_SERVER_WITH_AGENT", "FAILED_TO_START_DATA_TRANSFER", "NOT_CONVERGING", "SNAPSHOTS_FAILURE", "UNSTABLE_NETWORK", ] } } impl AsRef<str> for DataReplicationErrorString { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Data replication initiation.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataReplicationInitiation { /// <p>The date and time of the current attempt to initiate data replication.</p> pub start_date_time: std::option::Option<std::string::String>, /// <p>The date and time of the next attempt to initiate data replication.</p> pub next_attempt_date_time: std::option::Option<std::string::String>, /// <p>The steps of the current attempt to initiate data replication.</p> pub steps: std::option::Option<std::vec::Vec<crate::model::DataReplicationInitiationStep>>, } impl DataReplicationInitiation { /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn start_date_time(&self) -> std::option::Option<&str> { self.start_date_time.as_deref() } /// <p>The date and time of the next attempt to initiate data replication.</p> pub fn next_attempt_date_time(&self) -> std::option::Option<&str> { self.next_attempt_date_time.as_deref() } /// <p>The steps of the current attempt to initiate data replication.</p> pub fn steps(&self) -> std::option::Option<&[crate::model::DataReplicationInitiationStep]> { self.steps.as_deref() } } impl std::fmt::Debug for DataReplicationInitiation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataReplicationInitiation"); formatter.field("start_date_time", &self.start_date_time); formatter.field("next_attempt_date_time", &self.next_attempt_date_time); formatter.field("steps", &self.steps); formatter.finish() } } /// See [`DataReplicationInitiation`](crate::model::DataReplicationInitiation) pub mod data_replication_initiation { /// A builder for [`DataReplicationInitiation`](crate::model::DataReplicationInitiation) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) start_date_time: std::option::Option<std::string::String>, pub(crate) next_attempt_date_time: std::option::Option<std::string::String>, pub(crate) steps: std::option::Option<std::vec::Vec<crate::model::DataReplicationInitiationStep>>, } impl Builder { /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn start_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.start_date_time = Some(input.into()); self } /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn set_start_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.start_date_time = input; self } /// <p>The date and time of the next attempt to initiate data replication.</p> pub fn next_attempt_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.next_attempt_date_time = Some(input.into()); self } /// <p>The date and time of the next attempt to initiate data replication.</p> pub fn set_next_attempt_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.next_attempt_date_time = input; self } /// Appends an item to `steps`. /// /// To override the contents of this collection use [`set_steps`](Self::set_steps). /// /// <p>The steps of the current attempt to initiate data replication.</p> pub fn steps(mut self, input: crate::model::DataReplicationInitiationStep) -> Self { let mut v = self.steps.unwrap_or_default(); v.push(input); self.steps = Some(v); self } /// <p>The steps of the current attempt to initiate data replication.</p> pub fn set_steps( mut self, input: std::option::Option<std::vec::Vec<crate::model::DataReplicationInitiationStep>>, ) -> Self { self.steps = input; self } /// Consumes the builder and constructs a [`DataReplicationInitiation`](crate::model::DataReplicationInitiation) pub fn build(self) -> crate::model::DataReplicationInitiation { crate::model::DataReplicationInitiation { start_date_time: self.start_date_time, next_attempt_date_time: self.next_attempt_date_time, steps: self.steps, } } } } impl DataReplicationInitiation { /// Creates a new builder-style object to manufacture [`DataReplicationInitiation`](crate::model::DataReplicationInitiation) pub fn builder() -> crate::model::data_replication_initiation::Builder { crate::model::data_replication_initiation::Builder::default() } } /// <p>Data replication initiation step.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataReplicationInitiationStep { /// <p>The name of the step.</p> pub name: std::option::Option<crate::model::DataReplicationInitiationStepName>, /// <p>The status of the step.</p> pub status: std::option::Option<crate::model::DataReplicationInitiationStepStatus>, } impl DataReplicationInitiationStep { /// <p>The name of the step.</p> pub fn name(&self) -> std::option::Option<&crate::model::DataReplicationInitiationStepName> { self.name.as_ref() } /// <p>The status of the step.</p> pub fn status( &self, ) -> std::option::Option<&crate::model::DataReplicationInitiationStepStatus> { self.status.as_ref() } } impl std::fmt::Debug for DataReplicationInitiationStep { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataReplicationInitiationStep"); formatter.field("name", &self.name); formatter.field("status", &self.status); formatter.finish() } } /// See [`DataReplicationInitiationStep`](crate::model::DataReplicationInitiationStep) pub mod data_replication_initiation_step { /// A builder for [`DataReplicationInitiationStep`](crate::model::DataReplicationInitiationStep) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<crate::model::DataReplicationInitiationStepName>, pub(crate) status: std::option::Option<crate::model::DataReplicationInitiationStepStatus>, } impl Builder { /// <p>The name of the step.</p> pub fn name(mut self, input: crate::model::DataReplicationInitiationStepName) -> Self { self.name = Some(input); self } /// <p>The name of the step.</p> pub fn set_name( mut self, input: std::option::Option<crate::model::DataReplicationInitiationStepName>, ) -> Self { self.name = input; self } /// <p>The status of the step.</p> pub fn status(mut self, input: crate::model::DataReplicationInitiationStepStatus) -> Self { self.status = Some(input); self } /// <p>The status of the step.</p> pub fn set_status( mut self, input: std::option::Option<crate::model::DataReplicationInitiationStepStatus>, ) -> Self { self.status = input; self } /// Consumes the builder and constructs a [`DataReplicationInitiationStep`](crate::model::DataReplicationInitiationStep) pub fn build(self) -> crate::model::DataReplicationInitiationStep { crate::model::DataReplicationInitiationStep { name: self.name, status: self.status, } } } } impl DataReplicationInitiationStep { /// Creates a new builder-style object to manufacture [`DataReplicationInitiationStep`](crate::model::DataReplicationInitiationStep) pub fn builder() -> crate::model::data_replication_initiation_step::Builder { crate::model::data_replication_initiation_step::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum DataReplicationInitiationStepStatus { #[allow(missing_docs)] // documentation missing in model Failed, #[allow(missing_docs)] // documentation missing in model InProgress, #[allow(missing_docs)] // documentation missing in model NotStarted, #[allow(missing_docs)] // documentation missing in model Skipped, #[allow(missing_docs)] // documentation missing in model Succeeded, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for DataReplicationInitiationStepStatus { fn from(s: &str) -> Self { match s { "FAILED" => DataReplicationInitiationStepStatus::Failed, "IN_PROGRESS" => DataReplicationInitiationStepStatus::InProgress, "NOT_STARTED" => DataReplicationInitiationStepStatus::NotStarted, "SKIPPED" => DataReplicationInitiationStepStatus::Skipped, "SUCCEEDED" => DataReplicationInitiationStepStatus::Succeeded, other => DataReplicationInitiationStepStatus::Unknown(other.to_owned()), } } } impl std::str::FromStr for DataReplicationInitiationStepStatus { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(DataReplicationInitiationStepStatus::from(s)) } } impl DataReplicationInitiationStepStatus { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { DataReplicationInitiationStepStatus::Failed => "FAILED", DataReplicationInitiationStepStatus::InProgress => "IN_PROGRESS", DataReplicationInitiationStepStatus::NotStarted => "NOT_STARTED", DataReplicationInitiationStepStatus::Skipped => "SKIPPED", DataReplicationInitiationStepStatus::Succeeded => "SUCCEEDED", DataReplicationInitiationStepStatus::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "FAILED", "IN_PROGRESS", "NOT_STARTED", "SKIPPED", "SUCCEEDED", ] } } impl AsRef<str> for DataReplicationInitiationStepStatus { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum DataReplicationInitiationStepName { #[allow(missing_docs)] // documentation missing in model AttachStagingDisks, #[allow(missing_docs)] // documentation missing in model AuthenticateWithService, #[allow(missing_docs)] // documentation missing in model BootReplicationServer, #[allow(missing_docs)] // documentation missing in model ConnectAgentToReplicationServer, #[allow(missing_docs)] // documentation missing in model CreateSecurityGroup, #[allow(missing_docs)] // documentation missing in model CreateStagingDisks, #[allow(missing_docs)] // documentation missing in model DownloadReplicationSoftware, #[allow(missing_docs)] // documentation missing in model LaunchReplicationServer, #[allow(missing_docs)] // documentation missing in model PairReplicationServerWithAgent, #[allow(missing_docs)] // documentation missing in model StartDataTransfer, #[allow(missing_docs)] // documentation missing in model Wait, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for DataReplicationInitiationStepName { fn from(s: &str) -> Self { match s { "ATTACH_STAGING_DISKS" => DataReplicationInitiationStepName::AttachStagingDisks, "AUTHENTICATE_WITH_SERVICE" => { DataReplicationInitiationStepName::AuthenticateWithService } "BOOT_REPLICATION_SERVER" => DataReplicationInitiationStepName::BootReplicationServer, "CONNECT_AGENT_TO_REPLICATION_SERVER" => { DataReplicationInitiationStepName::ConnectAgentToReplicationServer } "CREATE_SECURITY_GROUP" => DataReplicationInitiationStepName::CreateSecurityGroup, "CREATE_STAGING_DISKS" => DataReplicationInitiationStepName::CreateStagingDisks, "DOWNLOAD_REPLICATION_SOFTWARE" => { DataReplicationInitiationStepName::DownloadReplicationSoftware } "LAUNCH_REPLICATION_SERVER" => { DataReplicationInitiationStepName::LaunchReplicationServer } "PAIR_REPLICATION_SERVER_WITH_AGENT" => { DataReplicationInitiationStepName::PairReplicationServerWithAgent } "START_DATA_TRANSFER" => DataReplicationInitiationStepName::StartDataTransfer, "WAIT" => DataReplicationInitiationStepName::Wait, other => DataReplicationInitiationStepName::Unknown(other.to_owned()), } } } impl std::str::FromStr for DataReplicationInitiationStepName { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(DataReplicationInitiationStepName::from(s)) } } impl DataReplicationInitiationStepName { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { DataReplicationInitiationStepName::AttachStagingDisks => "ATTACH_STAGING_DISKS", DataReplicationInitiationStepName::AuthenticateWithService => { "AUTHENTICATE_WITH_SERVICE" } DataReplicationInitiationStepName::BootReplicationServer => "BOOT_REPLICATION_SERVER", DataReplicationInitiationStepName::ConnectAgentToReplicationServer => { "CONNECT_AGENT_TO_REPLICATION_SERVER" } DataReplicationInitiationStepName::CreateSecurityGroup => "CREATE_SECURITY_GROUP", DataReplicationInitiationStepName::CreateStagingDisks => "CREATE_STAGING_DISKS", DataReplicationInitiationStepName::DownloadReplicationSoftware => { "DOWNLOAD_REPLICATION_SOFTWARE" } DataReplicationInitiationStepName::LaunchReplicationServer => { "LAUNCH_REPLICATION_SERVER" } DataReplicationInitiationStepName::PairReplicationServerWithAgent => { "PAIR_REPLICATION_SERVER_WITH_AGENT" } DataReplicationInitiationStepName::StartDataTransfer => "START_DATA_TRANSFER", DataReplicationInitiationStepName::Wait => "WAIT", DataReplicationInitiationStepName::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "ATTACH_STAGING_DISKS", "AUTHENTICATE_WITH_SERVICE", "BOOT_REPLICATION_SERVER", "CONNECT_AGENT_TO_REPLICATION_SERVER", "CREATE_SECURITY_GROUP", "CREATE_STAGING_DISKS", "DOWNLOAD_REPLICATION_SOFTWARE", "LAUNCH_REPLICATION_SERVER", "PAIR_REPLICATION_SERVER_WITH_AGENT", "START_DATA_TRANSFER", "WAIT", ] } } impl AsRef<str> for DataReplicationInitiationStepName { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum DataReplicationState { #[allow(missing_docs)] // documentation missing in model Backlog, #[allow(missing_docs)] // documentation missing in model Continuous, #[allow(missing_docs)] // documentation missing in model CreatingSnapshot, #[allow(missing_docs)] // documentation missing in model Disconnected, #[allow(missing_docs)] // documentation missing in model InitialSync, #[allow(missing_docs)] // documentation missing in model Initiating, #[allow(missing_docs)] // documentation missing in model Paused, #[allow(missing_docs)] // documentation missing in model Rescan, #[allow(missing_docs)] // documentation missing in model Stalled, #[allow(missing_docs)] // documentation missing in model Stopped, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for DataReplicationState { fn from(s: &str) -> Self { match s { "BACKLOG" => DataReplicationState::Backlog, "CONTINUOUS" => DataReplicationState::Continuous, "CREATING_SNAPSHOT" => DataReplicationState::CreatingSnapshot, "DISCONNECTED" => DataReplicationState::Disconnected, "INITIAL_SYNC" => DataReplicationState::InitialSync, "INITIATING" => DataReplicationState::Initiating, "PAUSED" => DataReplicationState::Paused, "RESCAN" => DataReplicationState::Rescan, "STALLED" => DataReplicationState::Stalled, "STOPPED" => DataReplicationState::Stopped, other => DataReplicationState::Unknown(other.to_owned()), } } } impl std::str::FromStr for DataReplicationState { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(DataReplicationState::from(s)) } } impl DataReplicationState { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { DataReplicationState::Backlog => "BACKLOG", DataReplicationState::Continuous => "CONTINUOUS", DataReplicationState::CreatingSnapshot => "CREATING_SNAPSHOT", DataReplicationState::Disconnected => "DISCONNECTED", DataReplicationState::InitialSync => "INITIAL_SYNC", DataReplicationState::Initiating => "INITIATING", DataReplicationState::Paused => "PAUSED", DataReplicationState::Rescan => "RESCAN", DataReplicationState::Stalled => "STALLED", DataReplicationState::Stopped => "STOPPED", DataReplicationState::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "BACKLOG", "CONTINUOUS", "CREATING_SNAPSHOT", "DISCONNECTED", "INITIAL_SYNC", "INITIATING", "PAUSED", "RESCAN", "STALLED", "STOPPED", ] } } impl AsRef<str> for DataReplicationState { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A disk that should be replicated.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataReplicationInfoReplicatedDisk { /// <p>The name of the device.</p> pub device_name: std::option::Option<std::string::String>, /// <p>The total amount of data to be replicated in bytes.</p> pub total_storage_bytes: i64, /// <p>The amount of data replicated so far in bytes.</p> pub replicated_storage_bytes: i64, /// <p>The amount of data to be rescanned in bytes.</p> pub rescanned_storage_bytes: i64, /// <p>The size of the replication backlog in bytes.</p> pub backlogged_storage_bytes: i64, } impl DataReplicationInfoReplicatedDisk { /// <p>The name of the device.</p> pub fn device_name(&self) -> std::option::Option<&str> { self.device_name.as_deref() } /// <p>The total amount of data to be replicated in bytes.</p> pub fn total_storage_bytes(&self) -> i64 { self.total_storage_bytes } /// <p>The amount of data replicated so far in bytes.</p> pub fn replicated_storage_bytes(&self) -> i64 { self.replicated_storage_bytes } /// <p>The amount of data to be rescanned in bytes.</p> pub fn rescanned_storage_bytes(&self) -> i64 { self.rescanned_storage_bytes } /// <p>The size of the replication backlog in bytes.</p> pub fn backlogged_storage_bytes(&self) -> i64 { self.backlogged_storage_bytes } } impl std::fmt::Debug for DataReplicationInfoReplicatedDisk { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataReplicationInfoReplicatedDisk"); formatter.field("device_name", &self.device_name); formatter.field("total_storage_bytes", &self.total_storage_bytes); formatter.field("replicated_storage_bytes", &self.replicated_storage_bytes); formatter.field("rescanned_storage_bytes", &self.rescanned_storage_bytes); formatter.field("backlogged_storage_bytes", &self.backlogged_storage_bytes); formatter.finish() } } /// See [`DataReplicationInfoReplicatedDisk`](crate::model::DataReplicationInfoReplicatedDisk) pub mod data_replication_info_replicated_disk { /// A builder for [`DataReplicationInfoReplicatedDisk`](crate::model::DataReplicationInfoReplicatedDisk) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) device_name: std::option::Option<std::string::String>, pub(crate) total_storage_bytes: std::option::Option<i64>, pub(crate) replicated_storage_bytes: std::option::Option<i64>, pub(crate) rescanned_storage_bytes: std::option::Option<i64>, pub(crate) backlogged_storage_bytes: std::option::Option<i64>, } impl Builder { /// <p>The name of the device.</p> pub fn device_name(mut self, input: impl Into<std::string::String>) -> Self { self.device_name = Some(input.into()); self } /// <p>The name of the device.</p> pub fn set_device_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.device_name = input; self } /// <p>The total amount of data to be replicated in bytes.</p> pub fn total_storage_bytes(mut self, input: i64) -> Self { self.total_storage_bytes = Some(input); self } /// <p>The total amount of data to be replicated in bytes.</p> pub fn set_total_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.total_storage_bytes = input; self } /// <p>The amount of data replicated so far in bytes.</p> pub fn replicated_storage_bytes(mut self, input: i64) -> Self { self.replicated_storage_bytes = Some(input); self } /// <p>The amount of data replicated so far in bytes.</p> pub fn set_replicated_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.replicated_storage_bytes = input; self } /// <p>The amount of data to be rescanned in bytes.</p> pub fn rescanned_storage_bytes(mut self, input: i64) -> Self { self.rescanned_storage_bytes = Some(input); self } /// <p>The amount of data to be rescanned in bytes.</p> pub fn set_rescanned_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.rescanned_storage_bytes = input; self } /// <p>The size of the replication backlog in bytes.</p> pub fn backlogged_storage_bytes(mut self, input: i64) -> Self { self.backlogged_storage_bytes = Some(input); self } /// <p>The size of the replication backlog in bytes.</p> pub fn set_backlogged_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.backlogged_storage_bytes = input; self } /// Consumes the builder and constructs a [`DataReplicationInfoReplicatedDisk`](crate::model::DataReplicationInfoReplicatedDisk) pub fn build(self) -> crate::model::DataReplicationInfoReplicatedDisk { crate::model::DataReplicationInfoReplicatedDisk { device_name: self.device_name, total_storage_bytes: self.total_storage_bytes.unwrap_or_default(), replicated_storage_bytes: self.replicated_storage_bytes.unwrap_or_default(), rescanned_storage_bytes: self.rescanned_storage_bytes.unwrap_or_default(), backlogged_storage_bytes: self.backlogged_storage_bytes.unwrap_or_default(), } } } } impl DataReplicationInfoReplicatedDisk { /// Creates a new builder-style object to manufacture [`DataReplicationInfoReplicatedDisk`](crate::model::DataReplicationInfoReplicatedDisk) pub fn builder() -> crate::model::data_replication_info_replicated_disk::Builder { crate::model::data_replication_info_replicated_disk::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum LastLaunchResult { #[allow(missing_docs)] // documentation missing in model Failed, #[allow(missing_docs)] // documentation missing in model NotStarted, #[allow(missing_docs)] // documentation missing in model Pending, #[allow(missing_docs)] // documentation missing in model Succeeded, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for LastLaunchResult { fn from(s: &str) -> Self { match s { "FAILED" => LastLaunchResult::Failed, "NOT_STARTED" => LastLaunchResult::NotStarted, "PENDING" => LastLaunchResult::Pending, "SUCCEEDED" => LastLaunchResult::Succeeded, other => LastLaunchResult::Unknown(other.to_owned()), } } } impl std::str::FromStr for LastLaunchResult { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(LastLaunchResult::from(s)) } } impl LastLaunchResult { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { LastLaunchResult::Failed => "FAILED", LastLaunchResult::NotStarted => "NOT_STARTED", LastLaunchResult::Pending => "PENDING", LastLaunchResult::Succeeded => "SUCCEEDED", LastLaunchResult::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["FAILED", "NOT_STARTED", "PENDING", "SUCCEEDED"] } } impl AsRef<str> for LastLaunchResult { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A snapshot of a Source Server used during recovery.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoverySnapshot { /// <p>The ID of the Recovery Snapshot.</p> pub snapshot_id: std::option::Option<std::string::String>, /// <p>The ID of the Source Server that the snapshot was taken for.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The timestamp of when we expect the snapshot to be taken.</p> pub expected_timestamp: std::option::Option<std::string::String>, /// <p>The actual timestamp that the snapshot was taken.</p> pub timestamp: std::option::Option<std::string::String>, /// <p>A list of EBS snapshots.</p> pub ebs_snapshots: std::option::Option<std::vec::Vec<std::string::String>>, } impl RecoverySnapshot { /// <p>The ID of the Recovery Snapshot.</p> pub fn snapshot_id(&self) -> std::option::Option<&str> { self.snapshot_id.as_deref() } /// <p>The ID of the Source Server that the snapshot was taken for.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The timestamp of when we expect the snapshot to be taken.</p> pub fn
(&self) -> std::option::Option<&str> { self.expected_timestamp.as_deref() } /// <p>The actual timestamp that the snapshot was taken.</p> pub fn timestamp(&self) -> std::option::Option<&str> { self.timestamp.as_deref() } /// <p>A list of EBS snapshots.</p> pub fn ebs_snapshots(&self) -> std::option::Option<&[std::string::String]> { self.ebs_snapshots.as_deref() } } impl std::fmt::Debug for RecoverySnapshot { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoverySnapshot"); formatter.field("snapshot_id", &self.snapshot_id); formatter.field("source_server_id", &self.source_server_id); formatter.field("expected_timestamp", &self.expected_timestamp); formatter.field("timestamp", &self.timestamp); formatter.field("ebs_snapshots", &self.ebs_snapshots); formatter.finish() } } /// See [`RecoverySnapshot`](crate::model::RecoverySnapshot) pub mod recovery_snapshot { /// A builder for [`RecoverySnapshot`](crate::model::RecoverySnapshot) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) snapshot_id: std::option::Option<std::string::String>, pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) expected_timestamp: std::option::Option<std::string::String>, pub(crate) timestamp: std::option::Option<std::string::String>, pub(crate) ebs_snapshots: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// <p>The ID of the Recovery Snapshot.</p> pub fn snapshot_id(mut self, input: impl Into<std::string::String>) -> Self { self.snapshot_id = Some(input.into()); self } /// <p>The ID of the Recovery Snapshot.</p> pub fn set_snapshot_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.snapshot_id = input; self } /// <p>The ID of the Source Server that the snapshot was taken for.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The ID of the Source Server that the snapshot was taken for.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The timestamp of when we expect the snapshot to be taken.</p> pub fn expected_timestamp(mut self, input: impl Into<std::string::String>) -> Self { self.expected_timestamp = Some(input.into()); self } /// <p>The timestamp of when we expect the snapshot to be taken.</p> pub fn set_expected_timestamp( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.expected_timestamp = input; self } /// <p>The actual timestamp that the snapshot was taken.</p> pub fn timestamp(mut self, input: impl Into<std::string::String>) -> Self { self.timestamp = Some(input.into()); self } /// <p>The actual timestamp that the snapshot was taken.</p> pub fn set_timestamp(mut self, input: std::option::Option<std::string::String>) -> Self { self.timestamp = input; self } /// Appends an item to `ebs_snapshots`. /// /// To override the contents of this collection use [`set_ebs_snapshots`](Self::set_ebs_snapshots). /// /// <p>A list of EBS snapshots.</p> pub fn ebs_snapshots(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.ebs_snapshots.unwrap_or_default(); v.push(input.into()); self.ebs_snapshots = Some(v); self } /// <p>A list of EBS snapshots.</p> pub fn set_ebs_snapshots( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.ebs_snapshots = input; self } /// Consumes the builder and constructs a [`RecoverySnapshot`](crate::model::RecoverySnapshot) pub fn build(self) -> crate::model::RecoverySnapshot { crate::model::RecoverySnapshot { snapshot_id: self.snapshot_id, source_server_id: self.source_server_id, expected_timestamp: self.expected_timestamp, timestamp: self.timestamp, ebs_snapshots: self.ebs_snapshots, } } } } impl RecoverySnapshot { /// Creates a new builder-style object to manufacture [`RecoverySnapshot`](crate::model::RecoverySnapshot) pub fn builder() -> crate::model::recovery_snapshot::Builder { crate::model::recovery_snapshot::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum RecoverySnapshotsOrder { #[allow(missing_docs)] // documentation missing in model Asc, #[allow(missing_docs)] // documentation missing in model Desc, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for RecoverySnapshotsOrder { fn from(s: &str) -> Self { match s { "ASC" => RecoverySnapshotsOrder::Asc, "DESC" => RecoverySnapshotsOrder::Desc, other => RecoverySnapshotsOrder::Unknown(other.to_owned()), } } } impl std::str::FromStr for RecoverySnapshotsOrder { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(RecoverySnapshotsOrder::from(s)) } } impl RecoverySnapshotsOrder { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { RecoverySnapshotsOrder::Asc => "ASC", RecoverySnapshotsOrder::Desc => "DESC", RecoverySnapshotsOrder::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["ASC", "DESC"] } } impl AsRef<str> for RecoverySnapshotsOrder { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A set of filters by which to return Recovery Snapshots.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeRecoverySnapshotsRequestFilters { /// <p>The start date in a date range query.</p> pub from_date_time: std::option::Option<std::string::String>, /// <p>The end date in a date range query.</p> pub to_date_time: std::option::Option<std::string::String>, } impl DescribeRecoverySnapshotsRequestFilters { /// <p>The start date in a date range query.</p> pub fn from_date_time(&self) -> std::option::Option<&str> { self.from_date_time.as_deref() } /// <p>The end date in a date range query.</p> pub fn to_date_time(&self) -> std::option::Option<&str> { self.to_date_time.as_deref() } } impl std::fmt::Debug for DescribeRecoverySnapshotsRequestFilters { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeRecoverySnapshotsRequestFilters"); formatter.field("from_date_time", &self.from_date_time); formatter.field("to_date_time", &self.to_date_time); formatter.finish() } } /// See [`DescribeRecoverySnapshotsRequestFilters`](crate::model::DescribeRecoverySnapshotsRequestFilters) pub mod describe_recovery_snapshots_request_filters { /// A builder for [`DescribeRecoverySnapshotsRequestFilters`](crate::model::DescribeRecoverySnapshotsRequestFilters) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) from_date_time: std::option::Option<std::string::String>, pub(crate) to_date_time: std::option::Option<std::string::String>, } impl Builder { /// <p>The start date in a date range query.</p> pub fn from_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.from_date_time = Some(input.into()); self } /// <p>The start date in a date range query.</p> pub fn set_from_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.from_date_time = input; self } /// <p>The end date in a date range query.</p> pub fn to_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.to_date_time = Some(input.into()); self } /// <p>The end date in a date range query.</p> pub fn set_to_date_time(mut self, input: std::option::Option<std::string::String>) -> Self { self.to_date_time = input; self } /// Consumes the builder and constructs a [`DescribeRecoverySnapshotsRequestFilters`](crate::model::DescribeRecoverySnapshotsRequestFilters) pub fn build(self) -> crate::model::DescribeRecoverySnapshotsRequestFilters { crate::model::DescribeRecoverySnapshotsRequestFilters { from_date_time: self.from_date_time, to_date_time: self.to_date_time, } } } } impl DescribeRecoverySnapshotsRequestFilters { /// Creates a new builder-style object to manufacture [`DescribeRecoverySnapshotsRequestFilters`](crate::model::DescribeRecoverySnapshotsRequestFilters) pub fn builder() -> crate::model::describe_recovery_snapshots_request_filters::Builder { crate::model::describe_recovery_snapshots_request_filters::Builder::default() } } /// <p>A job is an asynchronous workflow.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Job { /// <p>The ID of the Job.</p> pub job_id: std::option::Option<std::string::String>, /// <p>The ARN of a Job.</p> pub arn: std::option::Option<std::string::String>, /// <p>The type of the Job.</p> pub r#type: std::option::Option<crate::model::JobType>, /// <p>A string representing who initiated the Job.</p> pub initiated_by: std::option::Option<crate::model::InitiatedBy>, /// <p>The date and time of when the Job was created.</p> pub creation_date_time: std::option::Option<std::string::String>, /// <p>The date and time of when the Job ended.</p> pub end_date_time: std::option::Option<std::string::String>, /// <p>The status of the Job.</p> pub status: std::option::Option<crate::model::JobStatus>, /// <p>A list of servers that the Job is acting upon.</p> pub participating_servers: std::option::Option<std::vec::Vec<crate::model::ParticipatingServer>>, /// <p>A list of tags associated with the Job.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl Job { /// <p>The ID of the Job.</p> pub fn job_id(&self) -> std::option::Option<&str> { self.job_id.as_deref() } /// <p>The ARN of a Job.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The type of the Job.</p> pub fn r#type(&self) -> std::option::Option<&crate::model::JobType> { self.r#type.as_ref() } /// <p>A string representing who initiated the Job.</p> pub fn initiated_by(&self) -> std::option::Option<&crate::model::InitiatedBy> { self.initiated_by.as_ref() } /// <p>The date and time of when the Job was created.</p> pub fn creation_date_time(&self) -> std::option::Option<&str> { self.creation_date_time.as_deref() } /// <p>The date and time of when the Job ended.</p> pub fn end_date_time(&self) -> std::option::Option<&str> { self.end_date_time.as_deref() } /// <p>The status of the Job.</p> pub fn status(&self) -> std::option::Option<&crate::model::JobStatus> { self.status.as_ref() } /// <p>A list of servers that the Job is acting upon.</p> pub fn participating_servers( &self, ) -> std::option::Option<&[crate::model::ParticipatingServer]> { self.participating_servers.as_deref() } /// <p>A list of tags associated with the Job.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } } impl std::fmt::Debug for Job { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Job"); formatter.field("job_id", &self.job_id); formatter.field("arn", &self.arn); formatter.field("r#type", &self.r#type); formatter.field("initiated_by", &self.initiated_by); formatter.field("creation_date_time", &self.creation_date_time); formatter.field("end_date_time", &self.end_date_time); formatter.field("status", &self.status); formatter.field("participating_servers", &self.participating_servers); formatter.field("tags", &"*** Sensitive Data Redacted ***"); formatter.finish() } } /// See [`Job`](crate::model::Job) pub mod job { /// A builder for [`Job`](crate::model::Job) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) job_id: std::option::Option<std::string::String>, pub(crate) arn: std::option::Option<std::string::String>, pub(crate) r#type: std::option::Option<crate::model::JobType>, pub(crate) initiated_by: std::option::Option<crate::model::InitiatedBy>, pub(crate) creation_date_time: std::option::Option<std::string::String>, pub(crate) end_date_time: std::option::Option<std::string::String>, pub(crate) status: std::option::Option<crate::model::JobStatus>, pub(crate) participating_servers: std::option::Option<std::vec::Vec<crate::model::ParticipatingServer>>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>The ID of the Job.</p> pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self { self.job_id = Some(input.into()); self } /// <p>The ID of the Job.</p> pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.job_id = input; self } /// <p>The ARN of a Job.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN of a Job.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The type of the Job.</p> pub fn r#type(mut self, input: crate::model::JobType) -> Self { self.r#type = Some(input); self } /// <p>The type of the Job.</p> pub fn set_type(mut self, input: std::option::Option<crate::model::JobType>) -> Self { self.r#type = input; self } /// <p>A string representing who initiated the Job.</p> pub fn initiated_by(mut self, input: crate::model::InitiatedBy) -> Self { self.initiated_by = Some(input); self } /// <p>A string representing who initiated the Job.</p> pub fn set_initiated_by( mut self, input: std::option::Option<crate::model::InitiatedBy>, ) -> Self { self.initiated_by = input; self } /// <p>The date and time of when the Job was created.</p> pub fn creation_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.creation_date_time = Some(input.into()); self } /// <p>The date and time of when the Job was created.</p> pub fn set_creation_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.creation_date_time = input; self } /// <p>The date and time of when the Job ended.</p> pub fn end_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.end_date_time = Some(input.into()); self } /// <p>The date and time of when the Job ended.</p> pub fn set_end_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.end_date_time = input; self } /// <p>The status of the Job.</p> pub fn status(mut self, input: crate::model::JobStatus) -> Self { self.status = Some(input); self } /// <p>The status of the Job.</p> pub fn set_status(mut self, input: std::option::Option<crate::model::JobStatus>) -> Self { self.status = input; self } /// Appends an item to `participating_servers`. /// /// To override the contents of this collection use [`set_participating_servers`](Self::set_participating_servers). /// /// <p>A list of servers that the Job is acting upon.</p> pub fn participating_servers(mut self, input: crate::model::ParticipatingServer) -> Self { let mut v = self.participating_servers.unwrap_or_default(); v.push(input); self.participating_servers = Some(v); self } /// <p>A list of servers that the Job is acting upon.</p> pub fn set_participating_servers( mut self, input: std::option::Option<std::vec::Vec<crate::model::ParticipatingServer>>, ) -> Self { self.participating_servers = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags associated with the Job.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>A list of tags associated with the Job.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`Job`](crate::model::Job) pub fn build(self) -> crate::model::Job { crate::model::Job { job_id: self.job_id, arn: self.arn, r#type: self.r#type, initiated_by: self.initiated_by, creation_date_time: self.creation_date_time, end_date_time: self.end_date_time, status: self.status, participating_servers: self.participating_servers, tags: self.tags, } } } } impl Job { /// Creates a new builder-style object to manufacture [`Job`](crate::model::Job) pub fn builder() -> crate::model::job::Builder { crate::model::job::Builder::default() } } /// <p>Represents a server participating in an asynchronous Job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ParticipatingServer { /// <p>The Source Server ID of a participating server.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The Recovery Instance ID of a participating server.</p> pub recovery_instance_id: std::option::Option<std::string::String>, /// <p>The launch status of a participating server.</p> pub launch_status: std::option::Option<crate::model::LaunchStatus>, } impl ParticipatingServer { /// <p>The Source Server ID of a participating server.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The Recovery Instance ID of a participating server.</p> pub fn recovery_instance_id(&self) -> std::option::Option<&str> { self.recovery_instance_id.as_deref() } /// <p>The launch status of a participating server.</p> pub fn launch_status(&self) -> std::option::Option<&crate::model::LaunchStatus> { self.launch_status.as_ref() } } impl std::fmt::Debug for ParticipatingServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ParticipatingServer"); formatter.field("source_server_id", &self.source_server_id); formatter.field("recovery_instance_id", &self.recovery_instance_id); formatter.field("launch_status", &self.launch_status); formatter.finish() } } /// See [`ParticipatingServer`](crate::model::ParticipatingServer) pub mod participating_server { /// A builder for [`ParticipatingServer`](crate::model::ParticipatingServer) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) recovery_instance_id: std::option::Option<std::string::String>, pub(crate) launch_status: std::option::Option<crate::model::LaunchStatus>, } impl Builder { /// <p>The Source Server ID of a participating server.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The Source Server ID of a participating server.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The Recovery Instance ID of a participating server.</p> pub fn recovery_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.recovery_instance_id = Some(input.into()); self } /// <p>The Recovery Instance ID of a participating server.</p> pub fn set_recovery_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recovery_instance_id = input; self } /// <p>The launch status of a participating server.</p> pub fn launch_status(mut self, input: crate::model::LaunchStatus) -> Self { self.launch_status = Some(input); self } /// <p>The launch status of a participating server.</p> pub fn set_launch_status( mut self, input: std::option::Option<crate::model::LaunchStatus>, ) -> Self { self.launch_status = input; self } /// Consumes the builder and constructs a [`ParticipatingServer`](crate::model::ParticipatingServer) pub fn build(self) -> crate::model::ParticipatingServer { crate::model::ParticipatingServer { source_server_id: self.source_server_id, recovery_instance_id: self.recovery_instance_id, launch_status: self.launch_status, } } } } impl ParticipatingServer { /// Creates a new builder-style object to manufacture [`ParticipatingServer`](crate::model::ParticipatingServer) pub fn builder() -> crate::model::participating_server::Builder { crate::model::participating_server::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum LaunchStatus { #[allow(missing_docs)] // documentation missing in model Failed, #[allow(missing_docs)] // documentation missing in model InProgress, #[allow(missing_docs)] // documentation missing in model Launched, #[allow(missing_docs)] // documentation missing in model Pending, #[allow(missing_docs)] // documentation missing in model Terminated, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for LaunchStatus { fn from(s: &str) -> Self { match s { "FAILED" => LaunchStatus::Failed, "IN_PROGRESS" => LaunchStatus::InProgress, "LAUNCHED" => LaunchStatus::Launched, "PENDING" => LaunchStatus::Pending, "TERMINATED" => LaunchStatus::Terminated, other => LaunchStatus::Unknown(other.to_owned()), } } } impl std::str::FromStr for LaunchStatus { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(LaunchStatus::from(s)) } } impl LaunchStatus { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { LaunchStatus::Failed => "FAILED", LaunchStatus::InProgress => "IN_PROGRESS", LaunchStatus::Launched => "LAUNCHED", LaunchStatus::Pending => "PENDING", LaunchStatus::Terminated => "TERMINATED", LaunchStatus::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["FAILED", "IN_PROGRESS", "LAUNCHED", "PENDING", "TERMINATED"] } } impl AsRef<str> for LaunchStatus { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum JobStatus { #[allow(missing_docs)] // documentation missing in model Completed, #[allow(missing_docs)] // documentation missing in model Pending, #[allow(missing_docs)] // documentation missing in model Started, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for JobStatus { fn from(s: &str) -> Self { match s { "COMPLETED" => JobStatus::Completed, "PENDING" => JobStatus::Pending, "STARTED" => JobStatus::Started, other => JobStatus::Unknown(other.to_owned()), } } } impl std::str::FromStr for JobStatus { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(JobStatus::from(s)) } } impl JobStatus { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { JobStatus::Completed => "COMPLETED", JobStatus::Pending => "PENDING", JobStatus::Started => "STARTED", JobStatus::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["COMPLETED", "PENDING", "STARTED"] } } impl AsRef<str> for JobStatus { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum InitiatedBy { #[allow(missing_docs)] // documentation missing in model Diagnostic, #[allow(missing_docs)] // documentation missing in model Failback, #[allow(missing_docs)] // documentation missing in model StartDrill, #[allow(missing_docs)] // documentation missing in model StartRecovery, #[allow(missing_docs)] // documentation missing in model TerminateRecoveryInstances, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for InitiatedBy { fn from(s: &str) -> Self { match s { "DIAGNOSTIC" => InitiatedBy::Diagnostic, "FAILBACK" => InitiatedBy::Failback, "START_DRILL" => InitiatedBy::StartDrill, "START_RECOVERY" => InitiatedBy::StartRecovery, "TERMINATE_RECOVERY_INSTANCES" => InitiatedBy::TerminateRecoveryInstances, other => InitiatedBy::Unknown(other.to_owned()), } } } impl std::str::FromStr for InitiatedBy { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(InitiatedBy::from(s)) } } impl InitiatedBy { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { InitiatedBy::Diagnostic => "DIAGNOSTIC", InitiatedBy::Failback => "FAILBACK", InitiatedBy::StartDrill => "START_DRILL", InitiatedBy::StartRecovery => "START_RECOVERY", InitiatedBy::TerminateRecoveryInstances => "TERMINATE_RECOVERY_INSTANCES", InitiatedBy::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "DIAGNOSTIC", "FAILBACK", "START_DRILL", "START_RECOVERY", "TERMINATE_RECOVERY_INSTANCES", ] } } impl AsRef<str> for InitiatedBy { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum JobType { #[allow(missing_docs)] // documentation missing in model Launch, #[allow(missing_docs)] // documentation missing in model Terminate, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for JobType { fn from(s: &str) -> Self { match s { "LAUNCH" => JobType::Launch, "TERMINATE" => JobType::Terminate, other => JobType::Unknown(other.to_owned()), } } } impl std::str::FromStr for JobType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(JobType::from(s)) } } impl JobType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { JobType::Launch => "LAUNCH", JobType::Terminate => "TERMINATE", JobType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["LAUNCH", "TERMINATE"] } } impl AsRef<str> for JobType { fn as_ref(&self) -> &str { self.as_str() } } /// <p>An object representing the Source Server to recover.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StartRecoveryRequestSourceServer { /// <p>The ID of the Source Server you want to recover.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The ID of a Recovery Snapshot we want to recover from. Omit this field to launch from the latest data by taking an on-demand snapshot.</p> pub recovery_snapshot_id: std::option::Option<std::string::String>, } impl StartRecoveryRequestSourceServer { /// <p>The ID of the Source Server you want to recover.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The ID of a Recovery Snapshot we want to recover from. Omit this field to launch from the latest data by taking an on-demand snapshot.</p> pub fn recovery_snapshot_id(&self) -> std::option::Option<&str> { self.recovery_snapshot_id.as_deref() } } impl std::fmt::Debug for StartRecoveryRequestSourceServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StartRecoveryRequestSourceServer"); formatter.field("source_server_id", &self.source_server_id); formatter.field("recovery_snapshot_id", &self.recovery_snapshot_id); formatter.finish() } } /// See [`StartRecoveryRequestSourceServer`](crate::model::StartRecoveryRequestSourceServer) pub mod start_recovery_request_source_server { /// A builder for [`StartRecoveryRequestSourceServer`](crate::model::StartRecoveryRequestSourceServer) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) recovery_snapshot_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The ID of the Source Server you want to recover.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The ID of the Source Server you want to recover.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The ID of a Recovery Snapshot we want to recover from. Omit this field to launch from the latest data by taking an on-demand snapshot.</p> pub fn recovery_snapshot_id(mut self, input: impl Into<std::string::String>) -> Self { self.recovery_snapshot_id = Some(input.into()); self } /// <p>The ID of a Recovery Snapshot we want to recover from. Omit this field to launch from the latest data by taking an on-demand snapshot.</p> pub fn set_recovery_snapshot_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recovery_snapshot_id = input; self } /// Consumes the builder and constructs a [`StartRecoveryRequestSourceServer`](crate::model::StartRecoveryRequestSourceServer) pub fn build(self) -> crate::model::StartRecoveryRequestSourceServer { crate::model::StartRecoveryRequestSourceServer { source_server_id: self.source_server_id, recovery_snapshot_id: self.recovery_snapshot_id, } } } } impl StartRecoveryRequestSourceServer { /// Creates a new builder-style object to manufacture [`StartRecoveryRequestSourceServer`](crate::model::StartRecoveryRequestSourceServer) pub fn builder() -> crate::model::start_recovery_request_source_server::Builder { crate::model::start_recovery_request_source_server::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct SourceServer { /// <p>The ID of the Source Server.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The ARN of the Source Server.</p> pub arn: std::option::Option<std::string::String>, /// <p>The tags associated with the Source Server.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The ID of the Recovery Instance associated with this Source Server.</p> pub recovery_instance_id: std::option::Option<std::string::String>, /// <p>The status of the last recovery launch of this Source Server.</p> pub last_launch_result: std::option::Option<crate::model::LastLaunchResult>, /// <p>The Data Replication Info of the Source Server.</p> pub data_replication_info: std::option::Option<crate::model::DataReplicationInfo>, /// <p>The lifecycle information of this Source Server.</p> pub life_cycle: std::option::Option<crate::model::LifeCycle>, /// <p>The source properties of the Source Server.</p> pub source_properties: std::option::Option<crate::model::SourceProperties>, } impl SourceServer { /// <p>The ID of the Source Server.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The ARN of the Source Server.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The tags associated with the Source Server.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } /// <p>The ID of the Recovery Instance associated with this Source Server.</p> pub fn recovery_instance_id(&self) -> std::option::Option<&str> { self.recovery_instance_id.as_deref() } /// <p>The status of the last recovery launch of this Source Server.</p> pub fn last_launch_result(&self) -> std::option::Option<&crate::model::LastLaunchResult> { self.last_launch_result.as_ref() } /// <p>The Data Replication Info of the Source Server.</p> pub fn data_replication_info(&self) -> std::option::Option<&crate::model::DataReplicationInfo> { self.data_replication_info.as_ref() } /// <p>The lifecycle information of this Source Server.</p> pub fn life_cycle(&self) -> std::option::Option<&crate::model::LifeCycle> { self.life_cycle.as_ref() } /// <p>The source properties of the Source Server.</p> pub fn source_properties(&self) -> std::option::Option<&crate::model::SourceProperties> { self.source_properties.as_ref() } } impl std::fmt::Debug for SourceServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("SourceServer"); formatter.field("source_server_id", &self.source_server_id); formatter.field("arn", &self.arn); formatter.field("tags", &"*** Sensitive Data Redacted ***"); formatter.field("recovery_instance_id", &self.recovery_instance_id); formatter.field("last_launch_result", &self.last_launch_result); formatter.field("data_replication_info", &self.data_replication_info); formatter.field("life_cycle", &self.life_cycle); formatter.field("source_properties", &self.source_properties); formatter.finish() } } /// See [`SourceServer`](crate::model::SourceServer) pub mod source_server { /// A builder for [`SourceServer`](crate::model::SourceServer) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) recovery_instance_id: std::option::Option<std::string::String>, pub(crate) last_launch_result: std::option::Option<crate::model::LastLaunchResult>, pub(crate) data_replication_info: std::option::Option<crate::model::DataReplicationInfo>, pub(crate) life_cycle: std::option::Option<crate::model::LifeCycle>, pub(crate) source_properties: std::option::Option<crate::model::SourceProperties>, } impl Builder { /// <p>The ID of the Source Server.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The ID of the Source Server.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The ARN of the Source Server.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN of the Source Server.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>The tags associated with the Source Server.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>The tags associated with the Source Server.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>The ID of the Recovery Instance associated with this Source Server.</p> pub fn recovery_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.recovery_instance_id = Some(input.into()); self } /// <p>The ID of the Recovery Instance associated with this Source Server.</p> pub fn set_recovery_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recovery_instance_id = input; self } /// <p>The status of the last recovery launch of this Source Server.</p> pub fn last_launch_result(mut self, input: crate::model::LastLaunchResult) -> Self { self.last_launch_result = Some(input); self } /// <p>The status of the last recovery launch of this Source Server.</p> pub fn set_last_launch_result( mut self, input: std::option::Option<crate::model::LastLaunchResult>, ) -> Self { self.last_launch_result = input; self } /// <p>The Data Replication Info of the Source Server.</p> pub fn data_replication_info(mut self, input: crate::model::DataReplicationInfo) -> Self { self.data_replication_info = Some(input); self } /// <p>The Data Replication Info of the Source Server.</p> pub fn set_data_replication_info( mut self, input: std::option::Option<crate::model::DataReplicationInfo>, ) -> Self { self.data_replication_info = input; self } /// <p>The lifecycle information of this Source Server.</p> pub fn life_cycle(mut self, input: crate::model::LifeCycle) -> Self { self.life_cycle = Some(input); self } /// <p>The lifecycle information of this Source Server.</p> pub fn set_life_cycle( mut self, input: std::option::Option<crate::model::LifeCycle>, ) -> Self { self.life_cycle = input; self } /// <p>The source properties of the Source Server.</p> pub fn source_properties(mut self, input: crate::model::SourceProperties) -> Self { self.source_properties = Some(input); self } /// <p>The source properties of the Source Server.</p> pub fn set_source_properties( mut self, input: std::option::Option<crate::model::SourceProperties>, ) -> Self { self.source_properties = input; self } /// Consumes the builder and constructs a [`SourceServer`](crate::model::SourceServer) pub fn build(self) -> crate::model::SourceServer { crate::model::SourceServer { source_server_id: self.source_server_id, arn: self.arn, tags: self.tags, recovery_instance_id: self.recovery_instance_id, last_launch_result: self.last_launch_result, data_replication_info: self.data_replication_info, life_cycle: self.life_cycle, source_properties: self.source_properties, } } } } impl SourceServer { /// Creates a new builder-style object to manufacture [`SourceServer`](crate::model::SourceServer) pub fn builder() -> crate::model::source_server::Builder { crate::model::source_server::Builder::default() } } /// <p>A set of filters by which to return Source Servers.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeSourceServersRequestFilters { /// <p>An array of Source Servers IDs that should be returned. An empty array means all Source Servers.</p> pub source_server_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>An ID that describes the hardware of the Source Server. This is either an EC2 instance id, a VMware uuid or a mac address.</p> pub hardware_id: std::option::Option<std::string::String>, } impl DescribeSourceServersRequestFilters { /// <p>An array of Source Servers IDs that should be returned. An empty array means all Source Servers.</p> pub fn source_server_i_ds(&self) -> std::option::Option<&[std::string::String]> { self.source_server_i_ds.as_deref() } /// <p>An ID that describes the hardware of the Source Server. This is either an EC2 instance id, a VMware uuid or a mac address.</p> pub fn hardware_id(&self) -> std::option::Option<&str> { self.hardware_id.as_deref() } } impl std::fmt::Debug for DescribeSourceServersRequestFilters { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeSourceServersRequestFilters"); formatter.field("source_server_i_ds", &self.source_server_i_ds); formatter.field("hardware_id", &self.hardware_id); formatter.finish() } } /// See [`DescribeSourceServersRequestFilters`](crate::model::DescribeSourceServersRequestFilters) pub mod describe_source_servers_request_filters { /// A builder for [`DescribeSourceServersRequestFilters`](crate::model::DescribeSourceServersRequestFilters) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) source_server_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) hardware_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `source_server_i_ds`. /// /// To override the contents of this collection use [`set_source_server_i_ds`](Self::set_source_server_i_ds). /// /// <p>An array of Source Servers IDs that should be returned. An empty array means all Source Servers.</p> pub fn source_server_i_ds(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.source_server_i_ds.unwrap_or_default(); v.push(input.into()); self.source_server_i_ds = Some(v); self } /// <p>An array of Source Servers IDs that should be returned. An empty array means all Source Servers.</p> pub fn set_source_server_i_ds( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.source_server_i_ds = input; self } /// <p>An ID that describes the hardware of the Source Server. This is either an EC2 instance id, a VMware uuid or a mac address.</p> pub fn hardware_id(mut self, input: impl Into<std::string::String>) -> Self { self.hardware_id = Some(input.into()); self } /// <p>An ID that describes the hardware of the Source Server. This is either an EC2 instance id, a VMware uuid or a mac address.</p> pub fn set_hardware_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.hardware_id = input; self } /// Consumes the builder and constructs a [`DescribeSourceServersRequestFilters`](crate::model::DescribeSourceServersRequestFilters) pub fn build(self) -> crate::model::DescribeSourceServersRequestFilters { crate::model::DescribeSourceServersRequestFilters { source_server_i_ds: self.source_server_i_ds, hardware_id: self.hardware_id, } } } } impl DescribeSourceServersRequestFilters { /// Creates a new builder-style object to manufacture [`DescribeSourceServersRequestFilters`](crate::model::DescribeSourceServersRequestFilters) pub fn builder() -> crate::model::describe_source_servers_request_filters::Builder { crate::model::describe_source_servers_request_filters::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ReplicationConfigurationTemplate { /// <p>The Replication Configuration Template ID.</p> pub replication_configuration_template_id: std::option::Option<std::string::String>, /// <p>The Replication Configuration Template ARN.</p> pub arn: std::option::Option<std::string::String>, /// <p>The subnet to be used by the replication staging area.</p> pub staging_area_subnet_id: std::option::Option<std::string::String>, /// <p>Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template.</p> pub associate_default_security_group: std::option::Option<bool>, /// <p>The security group IDs that will be used by the replication server.</p> pub replication_servers_security_groups_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>The instance type to be used for the replication server.</p> pub replication_server_instance_type: std::option::Option<std::string::String>, /// <p>Whether to use a dedicated Replication Server in the replication staging area.</p> pub use_dedicated_replication_server: std::option::Option<bool>, /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub default_large_staging_disk_type: std::option::Option<crate::model::ReplicationConfigurationDefaultLargeStagingDiskType>, /// <p>The type of EBS encryption to be used during replication.</p> pub ebs_encryption: std::option::Option<crate::model::ReplicationConfigurationEbsEncryption>, /// <p>The ARN of the EBS encryption key to be used during replication.</p> pub ebs_encryption_key_arn: std::option::Option<std::string::String>, /// <p>Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps.</p> pub bandwidth_throttling: i64, /// <p>The data plane routing mechanism that will be used for replication.</p> pub data_plane_routing: std::option::Option<crate::model::ReplicationConfigurationDataPlaneRouting>, /// <p>Whether to create a Public IP for the Recovery Instance by default.</p> pub create_public_ip: std::option::Option<bool>, /// <p>A set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc.</p> pub staging_area_tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>A set of tags to be associated with the Replication Configuration Template resource.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The Point in time (PIT) policy to manage snapshots taken during replication.</p> pub pit_policy: std::option::Option<std::vec::Vec<crate::model::PitPolicyRule>>, } impl ReplicationConfigurationTemplate { /// <p>The Replication Configuration Template ID.</p> pub fn replication_configuration_template_id(&self) -> std::option::Option<&str> { self.replication_configuration_template_id.as_deref() } /// <p>The Replication Configuration Template ARN.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The subnet to be used by the replication staging area.</p> pub fn staging_area_subnet_id(&self) -> std::option::Option<&str> { self.staging_area_subnet_id.as_deref() } /// <p>Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template.</p> pub fn associate_default_security_group(&self) -> std::option::Option<bool> { self.associate_default_security_group } /// <p>The security group IDs that will be used by the replication server.</p> pub fn replication_servers_security_groups_i_ds( &self, ) -> std::option::Option<&[std::string::String]> { self.replication_servers_security_groups_i_ds.as_deref() } /// <p>The instance type to be used for the replication server.</p> pub fn replication_server_instance_type(&self) -> std::option::Option<&str> { self.replication_server_instance_type.as_deref() } /// <p>Whether to use a dedicated Replication Server in the replication staging area.</p> pub fn use_dedicated_replication_server(&self) -> std::option::Option<bool> { self.use_dedicated_replication_server } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn default_large_staging_disk_type( &self, ) -> std::option::Option<&crate::model::ReplicationConfigurationDefaultLargeStagingDiskType> { self.default_large_staging_disk_type.as_ref() } /// <p>The type of EBS encryption to be used during replication.</p> pub fn ebs_encryption( &self, ) -> std::option::Option<&crate::model::ReplicationConfigurationEbsEncryption> { self.ebs_encryption.as_ref() } /// <p>The ARN of the EBS encryption key to be used during replication.</p> pub fn ebs_encryption_key_arn(&self) -> std::option::Option<&str> { self.ebs_encryption_key_arn.as_deref() } /// <p>Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps.</p> pub fn bandwidth_throttling(&self) -> i64 { self.bandwidth_throttling } /// <p>The data plane routing mechanism that will be used for replication.</p> pub fn data_plane_routing( &self, ) -> std::option::Option<&crate::model::ReplicationConfigurationDataPlaneRouting> { self.data_plane_routing.as_ref() } /// <p>Whether to create a Public IP for the Recovery Instance by default.</p> pub fn create_public_ip(&self) -> std::option::Option<bool> { self.create_public_ip } /// <p>A set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc.</p> pub fn staging_area_tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.staging_area_tags.as_ref() } /// <p>A set of tags to be associated with the Replication Configuration Template resource.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } /// <p>The Point in time (PIT) policy to manage snapshots taken during replication.</p> pub fn pit_policy(&self) -> std::option::Option<&[crate::model::PitPolicyRule]> { self.pit_policy.as_deref() } } impl std::fmt::Debug for ReplicationConfigurationTemplate { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ReplicationConfigurationTemplate"); formatter.field( "replication_configuration_template_id", &self.replication_configuration_template_id, ); formatter.field("arn", &self.arn); formatter.field("staging_area_subnet_id", &self.staging_area_subnet_id); formatter.field( "associate_default_security_group", &self.associate_default_security_group, ); formatter.field( "replication_servers_security_groups_i_ds", &self.replication_servers_security_groups_i_ds, ); formatter.field( "replication_server_instance_type", &self.replication_server_instance_type, ); formatter.field( "use_dedicated_replication_server", &self.use_dedicated_replication_server, ); formatter.field( "default_large_staging_disk_type", &self.default_large_staging_disk_type, ); formatter.field("ebs_encryption", &self.ebs_encryption); formatter.field("ebs_encryption_key_arn", &self.ebs_encryption_key_arn); formatter.field("bandwidth_throttling", &self.bandwidth_throttling); formatter.field("data_plane_routing", &self.data_plane_routing); formatter.field("create_public_ip", &self.create_public_ip); formatter.field("staging_area_tags", &"*** Sensitive Data Redacted ***"); formatter.field("tags", &"*** Sensitive Data Redacted ***"); formatter.field("pit_policy", &self.pit_policy); formatter.finish() } } /// See [`ReplicationConfigurationTemplate`](crate::model::ReplicationConfigurationTemplate) pub mod replication_configuration_template { /// A builder for [`ReplicationConfigurationTemplate`](crate::model::ReplicationConfigurationTemplate) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) replication_configuration_template_id: std::option::Option<std::string::String>, pub(crate) arn: std::option::Option<std::string::String>, pub(crate) staging_area_subnet_id: std::option::Option<std::string::String>, pub(crate) associate_default_security_group: std::option::Option<bool>, pub(crate) replication_servers_security_groups_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) replication_server_instance_type: std::option::Option<std::string::String>, pub(crate) use_dedicated_replication_server: std::option::Option<bool>, pub(crate) default_large_staging_disk_type: std::option::Option<crate::model::ReplicationConfigurationDefaultLargeStagingDiskType>, pub(crate) ebs_encryption: std::option::Option<crate::model::ReplicationConfigurationEbsEncryption>, pub(crate) ebs_encryption_key_arn: std::option::Option<std::string::String>, pub(crate) bandwidth_throttling: std::option::Option<i64>, pub(crate) data_plane_routing: std::option::Option<crate::model::ReplicationConfigurationDataPlaneRouting>, pub(crate) create_public_ip: std::option::Option<bool>, pub(crate) staging_area_tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) pit_policy: std::option::Option<std::vec::Vec<crate::model::PitPolicyRule>>, } impl Builder { /// <p>The Replication Configuration Template ID.</p> pub fn replication_configuration_template_id( mut self, input: impl Into<std::string::String>, ) -> Self { self.replication_configuration_template_id = Some(input.into()); self } /// <p>The Replication Configuration Template ID.</p> pub fn set_replication_configuration_template_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.replication_configuration_template_id = input; self } /// <p>The Replication Configuration Template ARN.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Replication Configuration Template ARN.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The subnet to be used by the replication staging area.</p> pub fn staging_area_subnet_id(mut self, input: impl Into<std::string::String>) -> Self { self.staging_area_subnet_id = Some(input.into()); self } /// <p>The subnet to be used by the replication staging area.</p> pub fn set_staging_area_subnet_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.staging_area_subnet_id = input; self } /// <p>Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template.</p> pub fn associate_default_security_group(mut self, input: bool) -> Self { self.associate_default_security_group = Some(input); self } /// <p>Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template.</p> pub fn set_associate_default_security_group( mut self, input: std::option::Option<bool>, ) -> Self { self.associate_default_security_group = input; self } /// Appends an item to `replication_servers_security_groups_i_ds`. /// /// To override the contents of this collection use [`set_replication_servers_security_groups_i_ds`](Self::set_replication_servers_security_groups_i_ds). /// /// <p>The security group IDs that will be used by the replication server.</p> pub fn replication_servers_security_groups_i_ds( mut self, input: impl Into<std::string::String>, ) -> Self { let mut v = self .replication_servers_security_groups_i_ds .unwrap_or_default(); v.push(input.into()); self.replication_servers_security_groups_i_ds = Some(v); self } /// <p>The security group IDs that will be used by the replication server.</p> pub fn set_replication_servers_security_groups_i_ds( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.replication_servers_security_groups_i_ds = input; self } /// <p>The instance type to be used for the replication server.</p> pub fn replication_server_instance_type( mut self, input: impl Into<std::string::String>, ) -> Self { self.replication_server_instance_type = Some(input.into()); self } /// <p>The instance type to be used for the replication server.</p> pub fn set_replication_server_instance_type( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.replication_server_instance_type = input; self } /// <p>Whether to use a dedicated Replication Server in the replication staging area.</p> pub fn use_dedicated_replication_server(mut self, input: bool) -> Self { self.use_dedicated_replication_server = Some(input); self } /// <p>Whether to use a dedicated Replication Server in the replication staging area.</p> pub fn set_use_dedicated_replication_server( mut self, input: std::option::Option<bool>, ) -> Self { self.use_dedicated_replication_server = input; self } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn default_large_staging_disk_type( mut self, input: crate::model::ReplicationConfigurationDefaultLargeStagingDiskType, ) -> Self { self.default_large_staging_disk_type = Some(input); self } /// <p>The Staging Disk EBS volume type to be used during replication.</p> pub fn set_default_large_staging_disk_type( mut self, input: std::option::Option< crate::model::ReplicationConfigurationDefaultLargeStagingDiskType, >, ) -> Self { self.default_large_staging_disk_type = input; self } /// <p>The type of EBS encryption to be used during replication.</p> pub fn ebs_encryption( mut self, input: crate::model::ReplicationConfigurationEbsEncryption, ) -> Self { self.ebs_encryption = Some(input); self } /// <p>The type of EBS encryption to be used during replication.</p> pub fn set_ebs_encryption( mut self, input: std::option::Option<crate::model::ReplicationConfigurationEbsEncryption>, ) -> Self { self.ebs_encryption = input; self } /// <p>The ARN of the EBS encryption key to be used during replication.</p> pub fn ebs_encryption_key_arn(mut self, input: impl Into<std::string::String>) -> Self { self.ebs_encryption_key_arn = Some(input.into()); self } /// <p>The ARN of the EBS encryption key to be used during replication.</p> pub fn set_ebs_encryption_key_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.ebs_encryption_key_arn = input; self } /// <p>Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps.</p> pub fn bandwidth_throttling(mut self, input: i64) -> Self { self.bandwidth_throttling = Some(input); self } /// <p>Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps.</p> pub fn set_bandwidth_throttling(mut self, input: std::option::Option<i64>) -> Self { self.bandwidth_throttling = input; self } /// <p>The data plane routing mechanism that will be used for replication.</p> pub fn data_plane_routing( mut self, input: crate::model::ReplicationConfigurationDataPlaneRouting, ) -> Self { self.data_plane_routing = Some(input); self } /// <p>The data plane routing mechanism that will be used for replication.</p> pub fn set_data_plane_routing( mut self, input: std::option::Option<crate::model::ReplicationConfigurationDataPlaneRouting>, ) -> Self { self.data_plane_routing = input; self } /// <p>Whether to create a Public IP for the Recovery Instance by default.</p> pub fn create_public_ip(mut self, input: bool) -> Self { self.create_public_ip = Some(input); self } /// <p>Whether to create a Public IP for the Recovery Instance by default.</p> pub fn set_create_public_ip(mut self, input: std::option::Option<bool>) -> Self { self.create_public_ip = input; self } /// Adds a key-value pair to `staging_area_tags`. /// /// To override the contents of this collection use [`set_staging_area_tags`](Self::set_staging_area_tags). /// /// <p>A set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc.</p> pub fn staging_area_tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.staging_area_tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.staging_area_tags = Some(hash_map); self } /// <p>A set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc.</p> pub fn set_staging_area_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.staging_area_tags = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A set of tags to be associated with the Replication Configuration Template resource.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>A set of tags to be associated with the Replication Configuration Template resource.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Appends an item to `pit_policy`. /// /// To override the contents of this collection use [`set_pit_policy`](Self::set_pit_policy). /// /// <p>The Point in time (PIT) policy to manage snapshots taken during replication.</p> pub fn pit_policy(mut self, input: crate::model::PitPolicyRule) -> Self { let mut v = self.pit_policy.unwrap_or_default(); v.push(input); self.pit_policy = Some(v); self } /// <p>The Point in time (PIT) policy to manage snapshots taken during replication.</p> pub fn set_pit_policy( mut self, input: std::option::Option<std::vec::Vec<crate::model::PitPolicyRule>>, ) -> Self { self.pit_policy = input; self } /// Consumes the builder and constructs a [`ReplicationConfigurationTemplate`](crate::model::ReplicationConfigurationTemplate) pub fn build(self) -> crate::model::ReplicationConfigurationTemplate { crate::model::ReplicationConfigurationTemplate { replication_configuration_template_id: self.replication_configuration_template_id, arn: self.arn, staging_area_subnet_id: self.staging_area_subnet_id, associate_default_security_group: self.associate_default_security_group, replication_servers_security_groups_i_ds: self .replication_servers_security_groups_i_ds, replication_server_instance_type: self.replication_server_instance_type, use_dedicated_replication_server: self.use_dedicated_replication_server, default_large_staging_disk_type: self.default_large_staging_disk_type, ebs_encryption: self.ebs_encryption, ebs_encryption_key_arn: self.ebs_encryption_key_arn, bandwidth_throttling: self.bandwidth_throttling.unwrap_or_default(), data_plane_routing: self.data_plane_routing, create_public_ip: self.create_public_ip, staging_area_tags: self.staging_area_tags, tags: self.tags, pit_policy: self.pit_policy, } } } } impl ReplicationConfigurationTemplate { /// Creates a new builder-style object to manufacture [`ReplicationConfigurationTemplate`](crate::model::ReplicationConfigurationTemplate) pub fn builder() -> crate::model::replication_configuration_template::Builder { crate::model::replication_configuration_template::Builder::default() } } /// <p>A Recovery Instance is a replica of a Source Server running on EC2.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstance { /// <p>The EC2 instance ID of the Recovery Instance.</p> pub ec2_instance_id: std::option::Option<std::string::String>, /// <p>The state of the EC2 instance for this Recovery Instance.</p> pub ec2_instance_state: std::option::Option<crate::model::Ec2InstanceState>, /// <p>The ID of the Job that created the Recovery Instance.</p> pub job_id: std::option::Option<std::string::String>, /// <p>The ID of the Recovery Instance.</p> pub recovery_instance_id: std::option::Option<std::string::String>, /// <p>The Source Server ID that this Recovery Instance is associated with.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The ARN of the Recovery Instance.</p> pub arn: std::option::Option<std::string::String>, /// <p>An array of tags that are associated with the Recovery Instance.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>An object representing failback related information of the Recovery Instance.</p> pub failback: std::option::Option<crate::model::RecoveryInstanceFailback>, /// <p>The Data Replication Info of the Recovery Instance.</p> pub data_replication_info: std::option::Option<crate::model::RecoveryInstanceDataReplicationInfo>, /// <p>Properties of the Recovery Instance machine.</p> pub recovery_instance_properties: std::option::Option<crate::model::RecoveryInstanceProperties>, /// <p>The date and time of the Point in Time (PIT) snapshot that this Recovery Instance was launched from.</p> pub point_in_time_snapshot_date_time: std::option::Option<std::string::String>, /// <p>Whether this Recovery Instance was created for a drill or for an actual Recovery event.</p> pub is_drill: std::option::Option<bool>, } impl RecoveryInstance { /// <p>The EC2 instance ID of the Recovery Instance.</p> pub fn ec2_instance_id(&self) -> std::option::Option<&str> { self.ec2_instance_id.as_deref() } /// <p>The state of the EC2 instance for this Recovery Instance.</p> pub fn ec2_instance_state(&self) -> std::option::Option<&crate::model::Ec2InstanceState> { self.ec2_instance_state.as_ref() } /// <p>The ID of the Job that created the Recovery Instance.</p> pub fn job_id(&self) -> std::option::Option<&str> { self.job_id.as_deref() } /// <p>The ID of the Recovery Instance.</p> pub fn recovery_instance_id(&self) -> std::option::Option<&str> { self.recovery_instance_id.as_deref() } /// <p>The Source Server ID that this Recovery Instance is associated with.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The ARN of the Recovery Instance.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>An array of tags that are associated with the Recovery Instance.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } /// <p>An object representing failback related information of the Recovery Instance.</p> pub fn failback(&self) -> std::option::Option<&crate::model::RecoveryInstanceFailback> { self.failback.as_ref() } /// <p>The Data Replication Info of the Recovery Instance.</p> pub fn data_replication_info( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationInfo> { self.data_replication_info.as_ref() } /// <p>Properties of the Recovery Instance machine.</p> pub fn recovery_instance_properties( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceProperties> { self.recovery_instance_properties.as_ref() } /// <p>The date and time of the Point in Time (PIT) snapshot that this Recovery Instance was launched from.</p> pub fn point_in_time_snapshot_date_time(&self) -> std::option::Option<&str> { self.point_in_time_snapshot_date_time.as_deref() } /// <p>Whether this Recovery Instance was created for a drill or for an actual Recovery event.</p> pub fn is_drill(&self) -> std::option::Option<bool> { self.is_drill } } impl std::fmt::Debug for RecoveryInstance { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstance"); formatter.field("ec2_instance_id", &self.ec2_instance_id); formatter.field("ec2_instance_state", &self.ec2_instance_state); formatter.field("job_id", &self.job_id); formatter.field("recovery_instance_id", &self.recovery_instance_id); formatter.field("source_server_id", &self.source_server_id); formatter.field("arn", &self.arn); formatter.field("tags", &"*** Sensitive Data Redacted ***"); formatter.field("failback", &self.failback); formatter.field("data_replication_info", &self.data_replication_info); formatter.field( "recovery_instance_properties", &self.recovery_instance_properties, ); formatter.field( "point_in_time_snapshot_date_time", &self.point_in_time_snapshot_date_time, ); formatter.field("is_drill", &self.is_drill); formatter.finish() } } /// See [`RecoveryInstance`](crate::model::RecoveryInstance) pub mod recovery_instance { /// A builder for [`RecoveryInstance`](crate::model::RecoveryInstance) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) ec2_instance_id: std::option::Option<std::string::String>, pub(crate) ec2_instance_state: std::option::Option<crate::model::Ec2InstanceState>, pub(crate) job_id: std::option::Option<std::string::String>, pub(crate) recovery_instance_id: std::option::Option<std::string::String>, pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) failback: std::option::Option<crate::model::RecoveryInstanceFailback>, pub(crate) data_replication_info: std::option::Option<crate::model::RecoveryInstanceDataReplicationInfo>, pub(crate) recovery_instance_properties: std::option::Option<crate::model::RecoveryInstanceProperties>, pub(crate) point_in_time_snapshot_date_time: std::option::Option<std::string::String>, pub(crate) is_drill: std::option::Option<bool>, } impl Builder { /// <p>The EC2 instance ID of the Recovery Instance.</p> pub fn ec2_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.ec2_instance_id = Some(input.into()); self } /// <p>The EC2 instance ID of the Recovery Instance.</p> pub fn set_ec2_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.ec2_instance_id = input; self } /// <p>The state of the EC2 instance for this Recovery Instance.</p> pub fn ec2_instance_state(mut self, input: crate::model::Ec2InstanceState) -> Self { self.ec2_instance_state = Some(input); self } /// <p>The state of the EC2 instance for this Recovery Instance.</p> pub fn set_ec2_instance_state( mut self, input: std::option::Option<crate::model::Ec2InstanceState>, ) -> Self { self.ec2_instance_state = input; self } /// <p>The ID of the Job that created the Recovery Instance.</p> pub fn job_id(mut self, input: impl Into<std::string::String>) -> Self { self.job_id = Some(input.into()); self } /// <p>The ID of the Job that created the Recovery Instance.</p> pub fn set_job_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.job_id = input; self } /// <p>The ID of the Recovery Instance.</p> pub fn recovery_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.recovery_instance_id = Some(input.into()); self } /// <p>The ID of the Recovery Instance.</p> pub fn set_recovery_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recovery_instance_id = input; self } /// <p>The Source Server ID that this Recovery Instance is associated with.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The Source Server ID that this Recovery Instance is associated with.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The ARN of the Recovery Instance.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN of the Recovery Instance.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>An array of tags that are associated with the Recovery Instance.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>An array of tags that are associated with the Recovery Instance.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>An object representing failback related information of the Recovery Instance.</p> pub fn failback(mut self, input: crate::model::RecoveryInstanceFailback) -> Self { self.failback = Some(input); self } /// <p>An object representing failback related information of the Recovery Instance.</p> pub fn set_failback( mut self, input: std::option::Option<crate::model::RecoveryInstanceFailback>, ) -> Self { self.failback = input; self } /// <p>The Data Replication Info of the Recovery Instance.</p> pub fn data_replication_info( mut self, input: crate::model::RecoveryInstanceDataReplicationInfo, ) -> Self { self.data_replication_info = Some(input); self } /// <p>The Data Replication Info of the Recovery Instance.</p> pub fn set_data_replication_info( mut self, input: std::option::Option<crate::model::RecoveryInstanceDataReplicationInfo>, ) -> Self { self.data_replication_info = input; self } /// <p>Properties of the Recovery Instance machine.</p> pub fn recovery_instance_properties( mut self, input: crate::model::RecoveryInstanceProperties, ) -> Self { self.recovery_instance_properties = Some(input); self } /// <p>Properties of the Recovery Instance machine.</p> pub fn set_recovery_instance_properties( mut self, input: std::option::Option<crate::model::RecoveryInstanceProperties>, ) -> Self { self.recovery_instance_properties = input; self } /// <p>The date and time of the Point in Time (PIT) snapshot that this Recovery Instance was launched from.</p> pub fn point_in_time_snapshot_date_time( mut self, input: impl Into<std::string::String>, ) -> Self { self.point_in_time_snapshot_date_time = Some(input.into()); self } /// <p>The date and time of the Point in Time (PIT) snapshot that this Recovery Instance was launched from.</p> pub fn set_point_in_time_snapshot_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.point_in_time_snapshot_date_time = input; self } /// <p>Whether this Recovery Instance was created for a drill or for an actual Recovery event.</p> pub fn is_drill(mut self, input: bool) -> Self { self.is_drill = Some(input); self } /// <p>Whether this Recovery Instance was created for a drill or for an actual Recovery event.</p> pub fn set_is_drill(mut self, input: std::option::Option<bool>) -> Self { self.is_drill = input; self } /// Consumes the builder and constructs a [`RecoveryInstance`](crate::model::RecoveryInstance) pub fn build(self) -> crate::model::RecoveryInstance { crate::model::RecoveryInstance { ec2_instance_id: self.ec2_instance_id, ec2_instance_state: self.ec2_instance_state, job_id: self.job_id, recovery_instance_id: self.recovery_instance_id, source_server_id: self.source_server_id, arn: self.arn, tags: self.tags, failback: self.failback, data_replication_info: self.data_replication_info, recovery_instance_properties: self.recovery_instance_properties, point_in_time_snapshot_date_time: self.point_in_time_snapshot_date_time, is_drill: self.is_drill, } } } } impl RecoveryInstance { /// Creates a new builder-style object to manufacture [`RecoveryInstance`](crate::model::RecoveryInstance) pub fn builder() -> crate::model::recovery_instance::Builder { crate::model::recovery_instance::Builder::default() } } /// <p>Properties of the Recovery Instance machine.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceProperties { /// <p>The date and time the Recovery Instance properties were last updated on.</p> pub last_updated_date_time: std::option::Option<std::string::String>, /// <p>Hints used to uniquely identify a machine.</p> pub identification_hints: std::option::Option<crate::model::IdentificationHints>, /// <p>An array of network interfaces.</p> pub network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, /// <p>An array of disks.</p> pub disks: std::option::Option<std::vec::Vec<crate::model::RecoveryInstanceDisk>>, /// <p>An array of CPUs.</p> pub cpus: std::option::Option<std::vec::Vec<crate::model::Cpu>>, /// <p>The amount of RAM in bytes.</p> pub ram_bytes: i64, /// <p>Operating system.</p> pub os: std::option::Option<crate::model::Os>, } impl RecoveryInstanceProperties { /// <p>The date and time the Recovery Instance properties were last updated on.</p> pub fn last_updated_date_time(&self) -> std::option::Option<&str> { self.last_updated_date_time.as_deref() } /// <p>Hints used to uniquely identify a machine.</p> pub fn identification_hints(&self) -> std::option::Option<&crate::model::IdentificationHints> { self.identification_hints.as_ref() } /// <p>An array of network interfaces.</p> pub fn network_interfaces(&self) -> std::option::Option<&[crate::model::NetworkInterface]> { self.network_interfaces.as_deref() } /// <p>An array of disks.</p> pub fn disks(&self) -> std::option::Option<&[crate::model::RecoveryInstanceDisk]> { self.disks.as_deref() } /// <p>An array of CPUs.</p> pub fn cpus(&self) -> std::option::Option<&[crate::model::Cpu]> { self.cpus.as_deref() } /// <p>The amount of RAM in bytes.</p> pub fn ram_bytes(&self) -> i64 { self.ram_bytes } /// <p>Operating system.</p> pub fn os(&self) -> std::option::Option<&crate::model::Os> { self.os.as_ref() } } impl std::fmt::Debug for RecoveryInstanceProperties { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceProperties"); formatter.field("last_updated_date_time", &self.last_updated_date_time); formatter.field("identification_hints", &self.identification_hints); formatter.field("network_interfaces", &self.network_interfaces); formatter.field("disks", &self.disks); formatter.field("cpus", &self.cpus); formatter.field("ram_bytes", &self.ram_bytes); formatter.field("os", &self.os); formatter.finish() } } /// See [`RecoveryInstanceProperties`](crate::model::RecoveryInstanceProperties) pub mod recovery_instance_properties { /// A builder for [`RecoveryInstanceProperties`](crate::model::RecoveryInstanceProperties) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) last_updated_date_time: std::option::Option<std::string::String>, pub(crate) identification_hints: std::option::Option<crate::model::IdentificationHints>, pub(crate) network_interfaces: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, pub(crate) disks: std::option::Option<std::vec::Vec<crate::model::RecoveryInstanceDisk>>, pub(crate) cpus: std::option::Option<std::vec::Vec<crate::model::Cpu>>, pub(crate) ram_bytes: std::option::Option<i64>, pub(crate) os: std::option::Option<crate::model::Os>, } impl Builder { /// <p>The date and time the Recovery Instance properties were last updated on.</p> pub fn last_updated_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.last_updated_date_time = Some(input.into()); self } /// <p>The date and time the Recovery Instance properties were last updated on.</p> pub fn set_last_updated_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_updated_date_time = input; self } /// <p>Hints used to uniquely identify a machine.</p> pub fn identification_hints(mut self, input: crate::model::IdentificationHints) -> Self { self.identification_hints = Some(input); self } /// <p>Hints used to uniquely identify a machine.</p> pub fn set_identification_hints( mut self, input: std::option::Option<crate::model::IdentificationHints>, ) -> Self { self.identification_hints = input; self } /// Appends an item to `network_interfaces`. /// /// To override the contents of this collection use [`set_network_interfaces`](Self::set_network_interfaces). /// /// <p>An array of network interfaces.</p> pub fn network_interfaces(mut self, input: crate::model::NetworkInterface) -> Self { let mut v = self.network_interfaces.unwrap_or_default(); v.push(input); self.network_interfaces = Some(v); self } /// <p>An array of network interfaces.</p> pub fn set_network_interfaces( mut self, input: std::option::Option<std::vec::Vec<crate::model::NetworkInterface>>, ) -> Self { self.network_interfaces = input; self } /// Appends an item to `disks`. /// /// To override the contents of this collection use [`set_disks`](Self::set_disks). /// /// <p>An array of disks.</p> pub fn disks(mut self, input: crate::model::RecoveryInstanceDisk) -> Self { let mut v = self.disks.unwrap_or_default(); v.push(input); self.disks = Some(v); self } /// <p>An array of disks.</p> pub fn set_disks( mut self, input: std::option::Option<std::vec::Vec<crate::model::RecoveryInstanceDisk>>, ) -> Self { self.disks = input; self } /// Appends an item to `cpus`. /// /// To override the contents of this collection use [`set_cpus`](Self::set_cpus). /// /// <p>An array of CPUs.</p> pub fn cpus(mut self, input: crate::model::Cpu) -> Self { let mut v = self.cpus.unwrap_or_default(); v.push(input); self.cpus = Some(v); self } /// <p>An array of CPUs.</p> pub fn set_cpus( mut self, input: std::option::Option<std::vec::Vec<crate::model::Cpu>>, ) -> Self { self.cpus = input; self } /// <p>The amount of RAM in bytes.</p> pub fn ram_bytes(mut self, input: i64) -> Self { self.ram_bytes = Some(input); self } /// <p>The amount of RAM in bytes.</p> pub fn set_ram_bytes(mut self, input: std::option::Option<i64>) -> Self { self.ram_bytes = input; self } /// <p>Operating system.</p> pub fn os(mut self, input: crate::model::Os) -> Self { self.os = Some(input); self } /// <p>Operating system.</p> pub fn set_os(mut self, input: std::option::Option<crate::model::Os>) -> Self { self.os = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceProperties`](crate::model::RecoveryInstanceProperties) pub fn build(self) -> crate::model::RecoveryInstanceProperties { crate::model::RecoveryInstanceProperties { last_updated_date_time: self.last_updated_date_time, identification_hints: self.identification_hints, network_interfaces: self.network_interfaces, disks: self.disks, cpus: self.cpus, ram_bytes: self.ram_bytes.unwrap_or_default(), os: self.os, } } } } impl RecoveryInstanceProperties { /// Creates a new builder-style object to manufacture [`RecoveryInstanceProperties`](crate::model::RecoveryInstanceProperties) pub fn builder() -> crate::model::recovery_instance_properties::Builder { crate::model::recovery_instance_properties::Builder::default() } } /// <p>An object representing a block storage device on the Recovery Instance.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDisk { /// <p>The internal device name of this disk. This is the name that is visible on the machine itself and not from the EC2 console.</p> pub internal_device_name: std::option::Option<std::string::String>, /// <p>The amount of storage on the disk in bytes.</p> pub bytes: i64, /// <p>The EBS Volume ID of this disk.</p> pub ebs_volume_id: std::option::Option<std::string::String>, } impl RecoveryInstanceDisk { /// <p>The internal device name of this disk. This is the name that is visible on the machine itself and not from the EC2 console.</p> pub fn internal_device_name(&self) -> std::option::Option<&str> { self.internal_device_name.as_deref() } /// <p>The amount of storage on the disk in bytes.</p> pub fn bytes(&self) -> i64 { self.bytes } /// <p>The EBS Volume ID of this disk.</p> pub fn ebs_volume_id(&self) -> std::option::Option<&str> { self.ebs_volume_id.as_deref() } } impl std::fmt::Debug for RecoveryInstanceDisk { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDisk"); formatter.field("internal_device_name", &self.internal_device_name); formatter.field("bytes", &self.bytes); formatter.field("ebs_volume_id", &self.ebs_volume_id); formatter.finish() } } /// See [`RecoveryInstanceDisk`](crate::model::RecoveryInstanceDisk) pub mod recovery_instance_disk { /// A builder for [`RecoveryInstanceDisk`](crate::model::RecoveryInstanceDisk) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) internal_device_name: std::option::Option<std::string::String>, pub(crate) bytes: std::option::Option<i64>, pub(crate) ebs_volume_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The internal device name of this disk. This is the name that is visible on the machine itself and not from the EC2 console.</p> pub fn internal_device_name(mut self, input: impl Into<std::string::String>) -> Self { self.internal_device_name = Some(input.into()); self } /// <p>The internal device name of this disk. This is the name that is visible on the machine itself and not from the EC2 console.</p> pub fn set_internal_device_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.internal_device_name = input; self } /// <p>The amount of storage on the disk in bytes.</p> pub fn bytes(mut self, input: i64) -> Self { self.bytes = Some(input); self } /// <p>The amount of storage on the disk in bytes.</p> pub fn set_bytes(mut self, input: std::option::Option<i64>) -> Self { self.bytes = input; self } /// <p>The EBS Volume ID of this disk.</p> pub fn ebs_volume_id(mut self, input: impl Into<std::string::String>) -> Self { self.ebs_volume_id = Some(input.into()); self } /// <p>The EBS Volume ID of this disk.</p> pub fn set_ebs_volume_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.ebs_volume_id = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDisk`](crate::model::RecoveryInstanceDisk) pub fn build(self) -> crate::model::RecoveryInstanceDisk { crate::model::RecoveryInstanceDisk { internal_device_name: self.internal_device_name, bytes: self.bytes.unwrap_or_default(), ebs_volume_id: self.ebs_volume_id, } } } } impl RecoveryInstanceDisk { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDisk`](crate::model::RecoveryInstanceDisk) pub fn builder() -> crate::model::recovery_instance_disk::Builder { crate::model::recovery_instance_disk::Builder::default() } } /// <p>Information about Data Replication</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDataReplicationInfo { /// <p>Data replication lag duration.</p> pub lag_duration: std::option::Option<std::string::String>, /// <p>An estimate of when the data replication will be completed.</p> pub eta_date_time: std::option::Option<std::string::String>, /// <p>The disks that should be replicated.</p> pub replicated_disks: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk>, >, /// <p>The state of the data replication.</p> pub data_replication_state: std::option::Option<crate::model::RecoveryInstanceDataReplicationState>, /// <p>Information about whether the data replication has been initiated.</p> pub data_replication_initiation: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiation>, /// <p>Information about Data Replication</p> pub data_replication_error: std::option::Option<crate::model::RecoveryInstanceDataReplicationError>, } impl RecoveryInstanceDataReplicationInfo { /// <p>Data replication lag duration.</p> pub fn lag_duration(&self) -> std::option::Option<&str> { self.lag_duration.as_deref() } /// <p>An estimate of when the data replication will be completed.</p> pub fn eta_date_time(&self) -> std::option::Option<&str> { self.eta_date_time.as_deref() } /// <p>The disks that should be replicated.</p> pub fn replicated_disks( &self, ) -> std::option::Option<&[crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk]> { self.replicated_disks.as_deref() } /// <p>The state of the data replication.</p> pub fn data_replication_state( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationState> { self.data_replication_state.as_ref() } /// <p>Information about whether the data replication has been initiated.</p> pub fn data_replication_initiation( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationInitiation> { self.data_replication_initiation.as_ref() } /// <p>Information about Data Replication</p> pub fn data_replication_error( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationError> { self.data_replication_error.as_ref() } } impl std::fmt::Debug for RecoveryInstanceDataReplicationInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDataReplicationInfo"); formatter.field("lag_duration", &self.lag_duration); formatter.field("eta_date_time", &self.eta_date_time); formatter.field("replicated_disks", &self.replicated_disks); formatter.field("data_replication_state", &self.data_replication_state); formatter.field( "data_replication_initiation", &self.data_replication_initiation, ); formatter.field("data_replication_error", &self.data_replication_error); formatter.finish() } } /// See [`RecoveryInstanceDataReplicationInfo`](crate::model::RecoveryInstanceDataReplicationInfo) pub mod recovery_instance_data_replication_info { /// A builder for [`RecoveryInstanceDataReplicationInfo`](crate::model::RecoveryInstanceDataReplicationInfo) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) lag_duration: std::option::Option<std::string::String>, pub(crate) eta_date_time: std::option::Option<std::string::String>, pub(crate) replicated_disks: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk>, >, pub(crate) data_replication_state: std::option::Option<crate::model::RecoveryInstanceDataReplicationState>, pub(crate) data_replication_initiation: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiation>, pub(crate) data_replication_error: std::option::Option<crate::model::RecoveryInstanceDataReplicationError>, } impl Builder { /// <p>Data replication lag duration.</p> pub fn lag_duration(mut self, input: impl Into<std::string::String>) -> Self { self.lag_duration = Some(input.into()); self } /// <p>Data replication lag duration.</p> pub fn set_lag_duration(mut self, input: std::option::Option<std::string::String>) -> Self { self.lag_duration = input; self } /// <p>An estimate of when the data replication will be completed.</p> pub fn eta_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.eta_date_time = Some(input.into()); self } /// <p>An estimate of when the data replication will be completed.</p> pub fn set_eta_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.eta_date_time = input; self } /// Appends an item to `replicated_disks`. /// /// To override the contents of this collection use [`set_replicated_disks`](Self::set_replicated_disks). /// /// <p>The disks that should be replicated.</p> pub fn replicated_disks( mut self, input: crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk, ) -> Self { let mut v = self.replicated_disks.unwrap_or_default(); v.push(input); self.replicated_disks = Some(v); self } /// <p>The disks that should be replicated.</p> pub fn set_replicated_disks( mut self, input: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk>, >, ) -> Self { self.replicated_disks = input; self } /// <p>The state of the data replication.</p> pub fn data_replication_state( mut self, input: crate::model::RecoveryInstanceDataReplicationState, ) -> Self { self.data_replication_state = Some(input); self } /// <p>The state of the data replication.</p> pub fn set_data_replication_state( mut self, input: std::option::Option<crate::model::RecoveryInstanceDataReplicationState>, ) -> Self { self.data_replication_state = input; self } /// <p>Information about whether the data replication has been initiated.</p> pub fn data_replication_initiation( mut self, input: crate::model::RecoveryInstanceDataReplicationInitiation, ) -> Self { self.data_replication_initiation = Some(input); self } /// <p>Information about whether the data replication has been initiated.</p> pub fn set_data_replication_initiation( mut self, input: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiation>, ) -> Self { self.data_replication_initiation = input; self } /// <p>Information about Data Replication</p> pub fn data_replication_error( mut self, input: crate::model::RecoveryInstanceDataReplicationError, ) -> Self { self.data_replication_error = Some(input); self } /// <p>Information about Data Replication</p> pub fn set_data_replication_error( mut self, input: std::option::Option<crate::model::RecoveryInstanceDataReplicationError>, ) -> Self { self.data_replication_error = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDataReplicationInfo`](crate::model::RecoveryInstanceDataReplicationInfo) pub fn build(self) -> crate::model::RecoveryInstanceDataReplicationInfo { crate::model::RecoveryInstanceDataReplicationInfo { lag_duration: self.lag_duration, eta_date_time: self.eta_date_time, replicated_disks: self.replicated_disks, data_replication_state: self.data_replication_state, data_replication_initiation: self.data_replication_initiation, data_replication_error: self.data_replication_error, } } } } impl RecoveryInstanceDataReplicationInfo { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDataReplicationInfo`](crate::model::RecoveryInstanceDataReplicationInfo) pub fn builder() -> crate::model::recovery_instance_data_replication_info::Builder { crate::model::recovery_instance_data_replication_info::Builder::default() } } /// <p>Error in data replication.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDataReplicationError { /// <p>Error in data replication.</p> pub error: std::option::Option<crate::model::FailbackReplicationError>, /// <p>Error in data replication.</p> pub raw_error: std::option::Option<std::string::String>, } impl RecoveryInstanceDataReplicationError { /// <p>Error in data replication.</p> pub fn error(&self) -> std::option::Option<&crate::model::FailbackReplicationError> { self.error.as_ref() } /// <p>Error in data replication.</p> pub fn raw_error(&self) -> std::option::Option<&str> { self.raw_error.as_deref() } } impl std::fmt::Debug for RecoveryInstanceDataReplicationError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDataReplicationError"); formatter.field("error", &self.error); formatter.field("raw_error", &self.raw_error); formatter.finish() } } /// See [`RecoveryInstanceDataReplicationError`](crate::model::RecoveryInstanceDataReplicationError) pub mod recovery_instance_data_replication_error { /// A builder for [`RecoveryInstanceDataReplicationError`](crate::model::RecoveryInstanceDataReplicationError) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) error: std::option::Option<crate::model::FailbackReplicationError>, pub(crate) raw_error: std::option::Option<std::string::String>, } impl Builder { /// <p>Error in data replication.</p> pub fn error(mut self, input: crate::model::FailbackReplicationError) -> Self { self.error = Some(input); self } /// <p>Error in data replication.</p> pub fn set_error( mut self, input: std::option::Option<crate::model::FailbackReplicationError>, ) -> Self { self.error = input; self } /// <p>Error in data replication.</p> pub fn raw_error(mut self, input: impl Into<std::string::String>) -> Self { self.raw_error = Some(input.into()); self } /// <p>Error in data replication.</p> pub fn set_raw_error(mut self, input: std::option::Option<std::string::String>) -> Self { self.raw_error = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDataReplicationError`](crate::model::RecoveryInstanceDataReplicationError) pub fn build(self) -> crate::model::RecoveryInstanceDataReplicationError { crate::model::RecoveryInstanceDataReplicationError { error: self.error, raw_error: self.raw_error, } } } } impl RecoveryInstanceDataReplicationError { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDataReplicationError`](crate::model::RecoveryInstanceDataReplicationError) pub fn builder() -> crate::model::recovery_instance_data_replication_error::Builder { crate::model::recovery_instance_data_replication_error::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum FailbackReplicationError { #[allow(missing_docs)] // documentation missing in model AgentNotSeen, #[allow(missing_docs)] // documentation missing in model FailbackClientNotSeen, #[allow(missing_docs)] // documentation missing in model FailedToConfigureReplicationSoftware, #[allow(missing_docs)] // documentation missing in model FailedToDownloadReplicationSoftwareToFailbackClient, #[allow(missing_docs)] // documentation missing in model FailedToEstablishAgentReplicatorSoftwareCommunication, #[allow(missing_docs)] // documentation missing in model FailedToEstablishRecoveryInstanceCommunication, #[allow(missing_docs)] // documentation missing in model FailedToPairAgentWithReplicationSoftware, #[allow(missing_docs)] // documentation missing in model NotConverging, #[allow(missing_docs)] // documentation missing in model UnstableNetwork, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for FailbackReplicationError { fn from(s: &str) -> Self { match s { "AGENT_NOT_SEEN" => FailbackReplicationError::AgentNotSeen, "FAILBACK_CLIENT_NOT_SEEN" => FailbackReplicationError::FailbackClientNotSeen, "FAILED_TO_CONFIGURE_REPLICATION_SOFTWARE" => { FailbackReplicationError::FailedToConfigureReplicationSoftware } "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT" => { FailbackReplicationError::FailedToDownloadReplicationSoftwareToFailbackClient } "FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION" => { FailbackReplicationError::FailedToEstablishAgentReplicatorSoftwareCommunication } "FAILED_TO_ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION" => { FailbackReplicationError::FailedToEstablishRecoveryInstanceCommunication } "FAILED_TO_PAIR_AGENT_WITH_REPLICATION_SOFTWARE" => { FailbackReplicationError::FailedToPairAgentWithReplicationSoftware } "NOT_CONVERGING" => FailbackReplicationError::NotConverging, "UNSTABLE_NETWORK" => FailbackReplicationError::UnstableNetwork, other => FailbackReplicationError::Unknown(other.to_owned()), } } } impl std::str::FromStr for FailbackReplicationError { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(FailbackReplicationError::from(s)) } } impl FailbackReplicationError { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { FailbackReplicationError::AgentNotSeen => "AGENT_NOT_SEEN", FailbackReplicationError::FailbackClientNotSeen => "FAILBACK_CLIENT_NOT_SEEN", FailbackReplicationError::FailedToConfigureReplicationSoftware => { "FAILED_TO_CONFIGURE_REPLICATION_SOFTWARE" } FailbackReplicationError::FailedToDownloadReplicationSoftwareToFailbackClient => { "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT" } FailbackReplicationError::FailedToEstablishAgentReplicatorSoftwareCommunication => { "FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION" } FailbackReplicationError::FailedToEstablishRecoveryInstanceCommunication => { "FAILED_TO_ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION" } FailbackReplicationError::FailedToPairAgentWithReplicationSoftware => { "FAILED_TO_PAIR_AGENT_WITH_REPLICATION_SOFTWARE" } FailbackReplicationError::NotConverging => "NOT_CONVERGING", FailbackReplicationError::UnstableNetwork => "UNSTABLE_NETWORK", FailbackReplicationError::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "AGENT_NOT_SEEN", "FAILBACK_CLIENT_NOT_SEEN", "FAILED_TO_CONFIGURE_REPLICATION_SOFTWARE", "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT", "FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION", "FAILED_TO_ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION", "FAILED_TO_PAIR_AGENT_WITH_REPLICATION_SOFTWARE", "NOT_CONVERGING", "UNSTABLE_NETWORK", ] } } impl AsRef<str> for FailbackReplicationError { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Data replication initiation.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDataReplicationInitiation { /// <p>The date and time of the current attempt to initiate data replication.</p> pub start_date_time: std::option::Option<std::string::String>, /// <p>The steps of the current attempt to initiate data replication.</p> pub steps: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInitiationStep>, >, } impl RecoveryInstanceDataReplicationInitiation { /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn start_date_time(&self) -> std::option::Option<&str> { self.start_date_time.as_deref() } /// <p>The steps of the current attempt to initiate data replication.</p> pub fn steps( &self, ) -> std::option::Option<&[crate::model::RecoveryInstanceDataReplicationInitiationStep]> { self.steps.as_deref() } } impl std::fmt::Debug for RecoveryInstanceDataReplicationInitiation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDataReplicationInitiation"); formatter.field("start_date_time", &self.start_date_time); formatter.field("steps", &self.steps); formatter.finish() } } /// See [`RecoveryInstanceDataReplicationInitiation`](crate::model::RecoveryInstanceDataReplicationInitiation) pub mod recovery_instance_data_replication_initiation { /// A builder for [`RecoveryInstanceDataReplicationInitiation`](crate::model::RecoveryInstanceDataReplicationInitiation) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) start_date_time: std::option::Option<std::string::String>, pub(crate) steps: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInitiationStep>, >, } impl Builder { /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn start_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.start_date_time = Some(input.into()); self } /// <p>The date and time of the current attempt to initiate data replication.</p> pub fn set_start_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.start_date_time = input; self } /// Appends an item to `steps`. /// /// To override the contents of this collection use [`set_steps`](Self::set_steps). /// /// <p>The steps of the current attempt to initiate data replication.</p> pub fn steps( mut self, input: crate::model::RecoveryInstanceDataReplicationInitiationStep, ) -> Self { let mut v = self.steps.unwrap_or_default(); v.push(input); self.steps = Some(v); self } /// <p>The steps of the current attempt to initiate data replication.</p> pub fn set_steps( mut self, input: std::option::Option< std::vec::Vec<crate::model::RecoveryInstanceDataReplicationInitiationStep>, >, ) -> Self { self.steps = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDataReplicationInitiation`](crate::model::RecoveryInstanceDataReplicationInitiation) pub fn build(self) -> crate::model::RecoveryInstanceDataReplicationInitiation { crate::model::RecoveryInstanceDataReplicationInitiation { start_date_time: self.start_date_time, steps: self.steps, } } } } impl RecoveryInstanceDataReplicationInitiation { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDataReplicationInitiation`](crate::model::RecoveryInstanceDataReplicationInitiation) pub fn builder() -> crate::model::recovery_instance_data_replication_initiation::Builder { crate::model::recovery_instance_data_replication_initiation::Builder::default() } } /// <p>Data replication initiation step.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDataReplicationInitiationStep { /// <p>The name of the step.</p> pub name: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiationStepName>, /// <p>The status of the step.</p> pub status: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiationStepStatus>, } impl RecoveryInstanceDataReplicationInitiationStep { /// <p>The name of the step.</p> pub fn name( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationInitiationStepName> { self.name.as_ref() } /// <p>The status of the step.</p> pub fn status( &self, ) -> std::option::Option<&crate::model::RecoveryInstanceDataReplicationInitiationStepStatus> { self.status.as_ref() } } impl std::fmt::Debug for RecoveryInstanceDataReplicationInitiationStep { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDataReplicationInitiationStep"); formatter.field("name", &self.name); formatter.field("status", &self.status); formatter.finish() } } /// See [`RecoveryInstanceDataReplicationInitiationStep`](crate::model::RecoveryInstanceDataReplicationInitiationStep) pub mod recovery_instance_data_replication_initiation_step { /// A builder for [`RecoveryInstanceDataReplicationInitiationStep`](crate::model::RecoveryInstanceDataReplicationInitiationStep) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiationStepName>, pub(crate) status: std::option::Option<crate::model::RecoveryInstanceDataReplicationInitiationStepStatus>, } impl Builder { /// <p>The name of the step.</p> pub fn name( mut self, input: crate::model::RecoveryInstanceDataReplicationInitiationStepName, ) -> Self { self.name = Some(input); self } /// <p>The name of the step.</p> pub fn set_name( mut self, input: std::option::Option< crate::model::RecoveryInstanceDataReplicationInitiationStepName, >, ) -> Self { self.name = input; self } /// <p>The status of the step.</p> pub fn status( mut self, input: crate::model::RecoveryInstanceDataReplicationInitiationStepStatus, ) -> Self { self.status = Some(input); self } /// <p>The status of the step.</p> pub fn set_status( mut self, input: std::option::Option< crate::model::RecoveryInstanceDataReplicationInitiationStepStatus, >, ) -> Self { self.status = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDataReplicationInitiationStep`](crate::model::RecoveryInstanceDataReplicationInitiationStep) pub fn build(self) -> crate::model::RecoveryInstanceDataReplicationInitiationStep { crate::model::RecoveryInstanceDataReplicationInitiationStep { name: self.name, status: self.status, } } } } impl RecoveryInstanceDataReplicationInitiationStep { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDataReplicationInitiationStep`](crate::model::RecoveryInstanceDataReplicationInitiationStep) pub fn builder() -> crate::model::recovery_instance_data_replication_initiation_step::Builder { crate::model::recovery_instance_data_replication_initiation_step::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum RecoveryInstanceDataReplicationInitiationStepStatus { #[allow(missing_docs)] // documentation missing in model Failed, #[allow(missing_docs)] // documentation missing in model InProgress, #[allow(missing_docs)] // documentation missing in model NotStarted, #[allow(missing_docs)] // documentation missing in model Skipped, #[allow(missing_docs)] // documentation missing in model Succeeded, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for RecoveryInstanceDataReplicationInitiationStepStatus { fn from(s: &str) -> Self { match s { "FAILED" => RecoveryInstanceDataReplicationInitiationStepStatus::Failed, "IN_PROGRESS" => RecoveryInstanceDataReplicationInitiationStepStatus::InProgress, "NOT_STARTED" => RecoveryInstanceDataReplicationInitiationStepStatus::NotStarted, "SKIPPED" => RecoveryInstanceDataReplicationInitiationStepStatus::Skipped, "SUCCEEDED" => RecoveryInstanceDataReplicationInitiationStepStatus::Succeeded, other => RecoveryInstanceDataReplicationInitiationStepStatus::Unknown(other.to_owned()), } } } impl std::str::FromStr for RecoveryInstanceDataReplicationInitiationStepStatus { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(RecoveryInstanceDataReplicationInitiationStepStatus::from(s)) } } impl RecoveryInstanceDataReplicationInitiationStepStatus { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { RecoveryInstanceDataReplicationInitiationStepStatus::Failed => "FAILED", RecoveryInstanceDataReplicationInitiationStepStatus::InProgress => "IN_PROGRESS", RecoveryInstanceDataReplicationInitiationStepStatus::NotStarted => "NOT_STARTED", RecoveryInstanceDataReplicationInitiationStepStatus::Skipped => "SKIPPED", RecoveryInstanceDataReplicationInitiationStepStatus::Succeeded => "SUCCEEDED", RecoveryInstanceDataReplicationInitiationStepStatus::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "FAILED", "IN_PROGRESS", "NOT_STARTED", "SKIPPED", "SUCCEEDED", ] } } impl AsRef<str> for RecoveryInstanceDataReplicationInitiationStepStatus { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum RecoveryInstanceDataReplicationInitiationStepName { #[allow(missing_docs)] // documentation missing in model CompleteVolumeMapping, #[allow(missing_docs)] // documentation missing in model ConfigureReplicationSoftware, #[allow(missing_docs)] // documentation missing in model DownloadReplicationSoftwareToFailbackClient, #[allow(missing_docs)] // documentation missing in model EstablishAgentReplicatorSoftwareCommunication, #[allow(missing_docs)] // documentation missing in model EstablishRecoveryInstanceCommunication, #[allow(missing_docs)] // documentation missing in model LinkFailbackClientWithRecoveryInstance, #[allow(missing_docs)] // documentation missing in model PairAgentWithReplicationSoftware, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for RecoveryInstanceDataReplicationInitiationStepName { fn from(s: &str) -> Self { match s { "COMPLETE_VOLUME_MAPPING" => RecoveryInstanceDataReplicationInitiationStepName::CompleteVolumeMapping, "CONFIGURE_REPLICATION_SOFTWARE" => RecoveryInstanceDataReplicationInitiationStepName::ConfigureReplicationSoftware, "DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT" => RecoveryInstanceDataReplicationInitiationStepName::DownloadReplicationSoftwareToFailbackClient, "ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION" => RecoveryInstanceDataReplicationInitiationStepName::EstablishAgentReplicatorSoftwareCommunication, "ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION" => RecoveryInstanceDataReplicationInitiationStepName::EstablishRecoveryInstanceCommunication, "LINK_FAILBACK_CLIENT_WITH_RECOVERY_INSTANCE" => RecoveryInstanceDataReplicationInitiationStepName::LinkFailbackClientWithRecoveryInstance, "PAIR_AGENT_WITH_REPLICATION_SOFTWARE" => RecoveryInstanceDataReplicationInitiationStepName::PairAgentWithReplicationSoftware, other => RecoveryInstanceDataReplicationInitiationStepName::Unknown(other.to_owned()) } } } impl std::str::FromStr for RecoveryInstanceDataReplicationInitiationStepName { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(RecoveryInstanceDataReplicationInitiationStepName::from(s)) } } impl RecoveryInstanceDataReplicationInitiationStepName { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { RecoveryInstanceDataReplicationInitiationStepName::CompleteVolumeMapping => "COMPLETE_VOLUME_MAPPING", RecoveryInstanceDataReplicationInitiationStepName::ConfigureReplicationSoftware => "CONFIGURE_REPLICATION_SOFTWARE", RecoveryInstanceDataReplicationInitiationStepName::DownloadReplicationSoftwareToFailbackClient => "DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT", RecoveryInstanceDataReplicationInitiationStepName::EstablishAgentReplicatorSoftwareCommunication => "ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION", RecoveryInstanceDataReplicationInitiationStepName::EstablishRecoveryInstanceCommunication => "ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION", RecoveryInstanceDataReplicationInitiationStepName::LinkFailbackClientWithRecoveryInstance => "LINK_FAILBACK_CLIENT_WITH_RECOVERY_INSTANCE", RecoveryInstanceDataReplicationInitiationStepName::PairAgentWithReplicationSoftware => "PAIR_AGENT_WITH_REPLICATION_SOFTWARE", RecoveryInstanceDataReplicationInitiationStepName::Unknown(s) => s.as_ref() } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "COMPLETE_VOLUME_MAPPING", "CONFIGURE_REPLICATION_SOFTWARE", "DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT", "ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION", "ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION", "LINK_FAILBACK_CLIENT_WITH_RECOVERY_INSTANCE", "PAIR_AGENT_WITH_REPLICATION_SOFTWARE", ] } } impl AsRef<str> for RecoveryInstanceDataReplicationInitiationStepName { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum RecoveryInstanceDataReplicationState { #[allow(missing_docs)] // documentation missing in model Backlog, #[allow(missing_docs)] // documentation missing in model Continuous, #[allow(missing_docs)] // documentation missing in model CreatingSnapshot, #[allow(missing_docs)] // documentation missing in model Disconnected, #[allow(missing_docs)] // documentation missing in model InitialSync, #[allow(missing_docs)] // documentation missing in model Initiating, #[allow(missing_docs)] // documentation missing in model Paused, #[allow(missing_docs)] // documentation missing in model Rescan, #[allow(missing_docs)] // documentation missing in model Stalled, #[allow(missing_docs)] // documentation missing in model Stopped, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for RecoveryInstanceDataReplicationState { fn from(s: &str) -> Self { match s { "BACKLOG" => RecoveryInstanceDataReplicationState::Backlog, "CONTINUOUS" => RecoveryInstanceDataReplicationState::Continuous, "CREATING_SNAPSHOT" => RecoveryInstanceDataReplicationState::CreatingSnapshot, "DISCONNECTED" => RecoveryInstanceDataReplicationState::Disconnected, "INITIAL_SYNC" => RecoveryInstanceDataReplicationState::InitialSync, "INITIATING" => RecoveryInstanceDataReplicationState::Initiating, "PAUSED" => RecoveryInstanceDataReplicationState::Paused, "RESCAN" => RecoveryInstanceDataReplicationState::Rescan, "STALLED" => RecoveryInstanceDataReplicationState::Stalled, "STOPPED" => RecoveryInstanceDataReplicationState::Stopped, other => RecoveryInstanceDataReplicationState::Unknown(other.to_owned()), } } } impl std::str::FromStr for RecoveryInstanceDataReplicationState { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(RecoveryInstanceDataReplicationState::from(s)) } } impl RecoveryInstanceDataReplicationState { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { RecoveryInstanceDataReplicationState::Backlog => "BACKLOG", RecoveryInstanceDataReplicationState::Continuous => "CONTINUOUS", RecoveryInstanceDataReplicationState::CreatingSnapshot => "CREATING_SNAPSHOT", RecoveryInstanceDataReplicationState::Disconnected => "DISCONNECTED", RecoveryInstanceDataReplicationState::InitialSync => "INITIAL_SYNC", RecoveryInstanceDataReplicationState::Initiating => "INITIATING", RecoveryInstanceDataReplicationState::Paused => "PAUSED", RecoveryInstanceDataReplicationState::Rescan => "RESCAN", RecoveryInstanceDataReplicationState::Stalled => "STALLED", RecoveryInstanceDataReplicationState::Stopped => "STOPPED", RecoveryInstanceDataReplicationState::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "BACKLOG", "CONTINUOUS", "CREATING_SNAPSHOT", "DISCONNECTED", "INITIAL_SYNC", "INITIATING", "PAUSED", "RESCAN", "STALLED", "STOPPED", ] } } impl AsRef<str> for RecoveryInstanceDataReplicationState { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A disk that should be replicated.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceDataReplicationInfoReplicatedDisk { /// <p>The name of the device.</p> pub device_name: std::option::Option<std::string::String>, /// <p>The total amount of data to be replicated in bytes.</p> pub total_storage_bytes: i64, /// <p>The amount of data replicated so far in bytes.</p> pub replicated_storage_bytes: i64, /// <p>The amount of data to be rescanned in bytes.</p> pub rescanned_storage_bytes: i64, /// <p>The size of the replication backlog in bytes.</p> pub backlogged_storage_bytes: i64, } impl RecoveryInstanceDataReplicationInfoReplicatedDisk { /// <p>The name of the device.</p> pub fn device_name(&self) -> std::option::Option<&str> { self.device_name.as_deref() } /// <p>The total amount of data to be replicated in bytes.</p> pub fn total_storage_bytes(&self) -> i64 { self.total_storage_bytes } /// <p>The amount of data replicated so far in bytes.</p> pub fn replicated_storage_bytes(&self) -> i64 { self.replicated_storage_bytes } /// <p>The amount of data to be rescanned in bytes.</p> pub fn rescanned_storage_bytes(&self) -> i64 { self.rescanned_storage_bytes } /// <p>The size of the replication backlog in bytes.</p> pub fn backlogged_storage_bytes(&self) -> i64 { self.backlogged_storage_bytes } } impl std::fmt::Debug for RecoveryInstanceDataReplicationInfoReplicatedDisk { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceDataReplicationInfoReplicatedDisk"); formatter.field("device_name", &self.device_name); formatter.field("total_storage_bytes", &self.total_storage_bytes); formatter.field("replicated_storage_bytes", &self.replicated_storage_bytes); formatter.field("rescanned_storage_bytes", &self.rescanned_storage_bytes); formatter.field("backlogged_storage_bytes", &self.backlogged_storage_bytes); formatter.finish() } } /// See [`RecoveryInstanceDataReplicationInfoReplicatedDisk`](crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk) pub mod recovery_instance_data_replication_info_replicated_disk { /// A builder for [`RecoveryInstanceDataReplicationInfoReplicatedDisk`](crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) device_name: std::option::Option<std::string::String>, pub(crate) total_storage_bytes: std::option::Option<i64>, pub(crate) replicated_storage_bytes: std::option::Option<i64>, pub(crate) rescanned_storage_bytes: std::option::Option<i64>, pub(crate) backlogged_storage_bytes: std::option::Option<i64>, } impl Builder { /// <p>The name of the device.</p> pub fn device_name(mut self, input: impl Into<std::string::String>) -> Self { self.device_name = Some(input.into()); self } /// <p>The name of the device.</p> pub fn set_device_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.device_name = input; self } /// <p>The total amount of data to be replicated in bytes.</p> pub fn total_storage_bytes(mut self, input: i64) -> Self { self.total_storage_bytes = Some(input); self } /// <p>The total amount of data to be replicated in bytes.</p> pub fn set_total_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.total_storage_bytes = input; self } /// <p>The amount of data replicated so far in bytes.</p> pub fn replicated_storage_bytes(mut self, input: i64) -> Self { self.replicated_storage_bytes = Some(input); self } /// <p>The amount of data replicated so far in bytes.</p> pub fn set_replicated_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.replicated_storage_bytes = input; self } /// <p>The amount of data to be rescanned in bytes.</p> pub fn rescanned_storage_bytes(mut self, input: i64) -> Self { self.rescanned_storage_bytes = Some(input); self } /// <p>The amount of data to be rescanned in bytes.</p> pub fn set_rescanned_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.rescanned_storage_bytes = input; self } /// <p>The size of the replication backlog in bytes.</p> pub fn backlogged_storage_bytes(mut self, input: i64) -> Self { self.backlogged_storage_bytes = Some(input); self } /// <p>The size of the replication backlog in bytes.</p> pub fn set_backlogged_storage_bytes(mut self, input: std::option::Option<i64>) -> Self { self.backlogged_storage_bytes = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceDataReplicationInfoReplicatedDisk`](crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk) pub fn build(self) -> crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk { crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk { device_name: self.device_name, total_storage_bytes: self.total_storage_bytes.unwrap_or_default(), replicated_storage_bytes: self.replicated_storage_bytes.unwrap_or_default(), rescanned_storage_bytes: self.rescanned_storage_bytes.unwrap_or_default(), backlogged_storage_bytes: self.backlogged_storage_bytes.unwrap_or_default(), } } } } impl RecoveryInstanceDataReplicationInfoReplicatedDisk { /// Creates a new builder-style object to manufacture [`RecoveryInstanceDataReplicationInfoReplicatedDisk`](crate::model::RecoveryInstanceDataReplicationInfoReplicatedDisk) pub fn builder( ) -> crate::model::recovery_instance_data_replication_info_replicated_disk::Builder { crate::model::recovery_instance_data_replication_info_replicated_disk::Builder::default() } } /// <p>An object representing failback related information of the Recovery Instance.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RecoveryInstanceFailback { /// <p>The ID of the failback client that this Recovery Instance is associated with.</p> pub failback_client_id: std::option::Option<std::string::String>, /// <p>The Job ID of the last failback log for this Recovery Instance.</p> pub failback_job_id: std::option::Option<std::string::String>, /// <p>The date and time that the failback initiation started.</p> pub failback_initiation_time: std::option::Option<std::string::String>, /// <p>The state of the failback process that this Recovery Instance is in.</p> pub state: std::option::Option<crate::model::FailbackState>, /// <p>The date and time the agent on the Recovery Instance was last seen by the service.</p> pub agent_last_seen_by_service_date_time: std::option::Option<std::string::String>, /// <p>The date and time that the failback client was last seen by the service.</p> pub failback_client_last_seen_by_service_date_time: std::option::Option<std::string::String>, /// <p>Whether we are failing back to the original Source Server for this Recovery Instance.</p> pub failback_to_original_server: std::option::Option<bool>, /// <p>The date and time of the first byte that was replicated from the Recovery Instance.</p> pub first_byte_date_time: std::option::Option<std::string::String>, /// <p>The amount of time that the Recovery Instance has been replicating for.</p> pub elapsed_replication_duration: std::option::Option<std::string::String>, } impl RecoveryInstanceFailback { /// <p>The ID of the failback client that this Recovery Instance is associated with.</p> pub fn failback_client_id(&self) -> std::option::Option<&str> { self.failback_client_id.as_deref() } /// <p>The Job ID of the last failback log for this Recovery Instance.</p> pub fn failback_job_id(&self) -> std::option::Option<&str> { self.failback_job_id.as_deref() } /// <p>The date and time that the failback initiation started.</p> pub fn failback_initiation_time(&self) -> std::option::Option<&str> { self.failback_initiation_time.as_deref() } /// <p>The state of the failback process that this Recovery Instance is in.</p> pub fn state(&self) -> std::option::Option<&crate::model::FailbackState> { self.state.as_ref() } /// <p>The date and time the agent on the Recovery Instance was last seen by the service.</p> pub fn agent_last_seen_by_service_date_time(&self) -> std::option::Option<&str> { self.agent_last_seen_by_service_date_time.as_deref() } /// <p>The date and time that the failback client was last seen by the service.</p> pub fn failback_client_last_seen_by_service_date_time(&self) -> std::option::Option<&str> { self.failback_client_last_seen_by_service_date_time .as_deref() } /// <p>Whether we are failing back to the original Source Server for this Recovery Instance.</p> pub fn failback_to_original_server(&self) -> std::option::Option<bool> { self.failback_to_original_server } /// <p>The date and time of the first byte that was replicated from the Recovery Instance.</p> pub fn first_byte_date_time(&self) -> std::option::Option<&str> { self.first_byte_date_time.as_deref() } /// <p>The amount of time that the Recovery Instance has been replicating for.</p> pub fn elapsed_replication_duration(&self) -> std::option::Option<&str> { self.elapsed_replication_duration.as_deref() } } impl std::fmt::Debug for RecoveryInstanceFailback { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RecoveryInstanceFailback"); formatter.field("failback_client_id", &self.failback_client_id); formatter.field("failback_job_id", &self.failback_job_id); formatter.field("failback_initiation_time", &self.failback_initiation_time); formatter.field("state", &self.state); formatter.field( "agent_last_seen_by_service_date_time", &self.agent_last_seen_by_service_date_time, ); formatter.field( "failback_client_last_seen_by_service_date_time", &self.failback_client_last_seen_by_service_date_time, ); formatter.field( "failback_to_original_server", &self.failback_to_original_server, ); formatter.field("first_byte_date_time", &self.first_byte_date_time); formatter.field( "elapsed_replication_duration", &self.elapsed_replication_duration, ); formatter.finish() } } /// See [`RecoveryInstanceFailback`](crate::model::RecoveryInstanceFailback) pub mod recovery_instance_failback { /// A builder for [`RecoveryInstanceFailback`](crate::model::RecoveryInstanceFailback) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) failback_client_id: std::option::Option<std::string::String>, pub(crate) failback_job_id: std::option::Option<std::string::String>, pub(crate) failback_initiation_time: std::option::Option<std::string::String>, pub(crate) state: std::option::Option<crate::model::FailbackState>, pub(crate) agent_last_seen_by_service_date_time: std::option::Option<std::string::String>, pub(crate) failback_client_last_seen_by_service_date_time: std::option::Option<std::string::String>, pub(crate) failback_to_original_server: std::option::Option<bool>, pub(crate) first_byte_date_time: std::option::Option<std::string::String>, pub(crate) elapsed_replication_duration: std::option::Option<std::string::String>, } impl Builder { /// <p>The ID of the failback client that this Recovery Instance is associated with.</p> pub fn failback_client_id(mut self, input: impl Into<std::string::String>) -> Self { self.failback_client_id = Some(input.into()); self } /// <p>The ID of the failback client that this Recovery Instance is associated with.</p> pub fn set_failback_client_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.failback_client_id = input; self } /// <p>The Job ID of the last failback log for this Recovery Instance.</p> pub fn failback_job_id(mut self, input: impl Into<std::string::String>) -> Self { self.failback_job_id = Some(input.into()); self } /// <p>The Job ID of the last failback log for this Recovery Instance.</p> pub fn set_failback_job_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.failback_job_id = input; self } /// <p>The date and time that the failback initiation started.</p> pub fn failback_initiation_time(mut self, input: impl Into<std::string::String>) -> Self { self.failback_initiation_time = Some(input.into()); self } /// <p>The date and time that the failback initiation started.</p> pub fn set_failback_initiation_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.failback_initiation_time = input; self } /// <p>The state of the failback process that this Recovery Instance is in.</p> pub fn state(mut self, input: crate::model::FailbackState) -> Self { self.state = Some(input); self } /// <p>The state of the failback process that this Recovery Instance is in.</p> pub fn set_state( mut self, input: std::option::Option<crate::model::FailbackState>, ) -> Self { self.state = input; self } /// <p>The date and time the agent on the Recovery Instance was last seen by the service.</p> pub fn agent_last_seen_by_service_date_time( mut self, input: impl Into<std::string::String>, ) -> Self { self.agent_last_seen_by_service_date_time = Some(input.into()); self } /// <p>The date and time the agent on the Recovery Instance was last seen by the service.</p> pub fn set_agent_last_seen_by_service_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.agent_last_seen_by_service_date_time = input; self } /// <p>The date and time that the failback client was last seen by the service.</p> pub fn failback_client_last_seen_by_service_date_time( mut self, input: impl Into<std::string::String>, ) -> Self { self.failback_client_last_seen_by_service_date_time = Some(input.into()); self } /// <p>The date and time that the failback client was last seen by the service.</p> pub fn set_failback_client_last_seen_by_service_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.failback_client_last_seen_by_service_date_time = input; self } /// <p>Whether we are failing back to the original Source Server for this Recovery Instance.</p> pub fn failback_to_original_server(mut self, input: bool) -> Self { self.failback_to_original_server = Some(input); self } /// <p>Whether we are failing back to the original Source Server for this Recovery Instance.</p> pub fn set_failback_to_original_server(mut self, input: std::option::Option<bool>) -> Self { self.failback_to_original_server = input; self } /// <p>The date and time of the first byte that was replicated from the Recovery Instance.</p> pub fn first_byte_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.first_byte_date_time = Some(input.into()); self } /// <p>The date and time of the first byte that was replicated from the Recovery Instance.</p> pub fn set_first_byte_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.first_byte_date_time = input; self } /// <p>The amount of time that the Recovery Instance has been replicating for.</p> pub fn elapsed_replication_duration( mut self, input: impl Into<std::string::String>, ) -> Self { self.elapsed_replication_duration = Some(input.into()); self } /// <p>The amount of time that the Recovery Instance has been replicating for.</p> pub fn set_elapsed_replication_duration( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.elapsed_replication_duration = input; self } /// Consumes the builder and constructs a [`RecoveryInstanceFailback`](crate::model::RecoveryInstanceFailback) pub fn build(self) -> crate::model::RecoveryInstanceFailback { crate::model::RecoveryInstanceFailback { failback_client_id: self.failback_client_id, failback_job_id: self.failback_job_id, failback_initiation_time: self.failback_initiation_time, state: self.state, agent_last_seen_by_service_date_time: self.agent_last_seen_by_service_date_time, failback_client_last_seen_by_service_date_time: self .failback_client_last_seen_by_service_date_time, failback_to_original_server: self.failback_to_original_server, first_byte_date_time: self.first_byte_date_time, elapsed_replication_duration: self.elapsed_replication_duration, } } } } impl RecoveryInstanceFailback { /// Creates a new builder-style object to manufacture [`RecoveryInstanceFailback`](crate::model::RecoveryInstanceFailback) pub fn builder() -> crate::model::recovery_instance_failback::Builder { crate::model::recovery_instance_failback::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum FailbackState { #[allow(missing_docs)] // documentation missing in model FailbackCompleted, #[allow(missing_docs)] // documentation missing in model FailbackError, #[allow(missing_docs)] // documentation missing in model FailbackInProgress, #[allow(missing_docs)] // documentation missing in model FailbackNotStarted, #[allow(missing_docs)] // documentation missing in model FailbackReadyForLaunch, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for FailbackState { fn from(s: &str) -> Self { match s { "FAILBACK_COMPLETED" => FailbackState::FailbackCompleted, "FAILBACK_ERROR" => FailbackState::FailbackError, "FAILBACK_IN_PROGRESS" => FailbackState::FailbackInProgress, "FAILBACK_NOT_STARTED" => FailbackState::FailbackNotStarted, "FAILBACK_READY_FOR_LAUNCH" => FailbackState::FailbackReadyForLaunch, other => FailbackState::Unknown(other.to_owned()), } } } impl std::str::FromStr for FailbackState { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(FailbackState::from(s)) } } impl FailbackState { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { FailbackState::FailbackCompleted => "FAILBACK_COMPLETED", FailbackState::FailbackError => "FAILBACK_ERROR", FailbackState::FailbackInProgress => "FAILBACK_IN_PROGRESS", FailbackState::FailbackNotStarted => "FAILBACK_NOT_STARTED", FailbackState::FailbackReadyForLaunch => "FAILBACK_READY_FOR_LAUNCH", FailbackState::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "FAILBACK_COMPLETED", "FAILBACK_ERROR", "FAILBACK_IN_PROGRESS", "FAILBACK_NOT_STARTED", "FAILBACK_READY_FOR_LAUNCH", ] } } impl AsRef<str> for FailbackState { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum Ec2InstanceState { #[allow(missing_docs)] // documentation missing in model NotFound, #[allow(missing_docs)] // documentation missing in model Pending, #[allow(missing_docs)] // documentation missing in model Running, #[allow(missing_docs)] // documentation missing in model ShuttingDown, #[allow(missing_docs)] // documentation missing in model Stopped, #[allow(missing_docs)] // documentation missing in model Stopping, #[allow(missing_docs)] // documentation missing in model Terminated, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for Ec2InstanceState { fn from(s: &str) -> Self { match s { "NOT_FOUND" => Ec2InstanceState::NotFound, "PENDING" => Ec2InstanceState::Pending, "RUNNING" => Ec2InstanceState::Running, "SHUTTING-DOWN" => Ec2InstanceState::ShuttingDown, "STOPPED" => Ec2InstanceState::Stopped, "STOPPING" => Ec2InstanceState::Stopping, "TERMINATED" => Ec2InstanceState::Terminated, other => Ec2InstanceState::Unknown(other.to_owned()), } } } impl std::str::FromStr for Ec2InstanceState { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(Ec2InstanceState::from(s)) } } impl Ec2InstanceState { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { Ec2InstanceState::NotFound => "NOT_FOUND", Ec2InstanceState::Pending => "PENDING", Ec2InstanceState::Running => "RUNNING", Ec2InstanceState::ShuttingDown => "SHUTTING-DOWN", Ec2InstanceState::Stopped => "STOPPED", Ec2InstanceState::Stopping => "STOPPING", Ec2InstanceState::Terminated => "TERMINATED", Ec2InstanceState::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "NOT_FOUND", "PENDING", "RUNNING", "SHUTTING-DOWN", "STOPPED", "STOPPING", "TERMINATED", ] } } impl AsRef<str> for Ec2InstanceState { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A set of filters by which to return Recovery Instances.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeRecoveryInstancesRequestFilters { /// <p>An array of Recovery Instance IDs that should be returned. An empty array means all Recovery Instances.</p> pub recovery_instance_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>An array of Source Server IDs for which associated Recovery Instances should be returned.</p> pub source_server_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, } impl DescribeRecoveryInstancesRequestFilters { /// <p>An array of Recovery Instance IDs that should be returned. An empty array means all Recovery Instances.</p> pub fn recovery_instance_i_ds(&self) -> std::option::Option<&[std::string::String]> { self.recovery_instance_i_ds.as_deref() } /// <p>An array of Source Server IDs for which associated Recovery Instances should be returned.</p> pub fn source_server_i_ds(&self) -> std::option::Option<&[std::string::String]> { self.source_server_i_ds.as_deref() } } impl std::fmt::Debug for DescribeRecoveryInstancesRequestFilters { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeRecoveryInstancesRequestFilters"); formatter.field("recovery_instance_i_ds", &self.recovery_instance_i_ds); formatter.field("source_server_i_ds", &self.source_server_i_ds); formatter.finish() } } /// See [`DescribeRecoveryInstancesRequestFilters`](crate::model::DescribeRecoveryInstancesRequestFilters) pub mod describe_recovery_instances_request_filters { /// A builder for [`DescribeRecoveryInstancesRequestFilters`](crate::model::DescribeRecoveryInstancesRequestFilters) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) recovery_instance_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) source_server_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// Appends an item to `recovery_instance_i_ds`. /// /// To override the contents of this collection use [`set_recovery_instance_i_ds`](Self::set_recovery_instance_i_ds). /// /// <p>An array of Recovery Instance IDs that should be returned. An empty array means all Recovery Instances.</p> pub fn recovery_instance_i_ds(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.recovery_instance_i_ds.unwrap_or_default(); v.push(input.into()); self.recovery_instance_i_ds = Some(v); self } /// <p>An array of Recovery Instance IDs that should be returned. An empty array means all Recovery Instances.</p> pub fn set_recovery_instance_i_ds( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.recovery_instance_i_ds = input; self } /// Appends an item to `source_server_i_ds`. /// /// To override the contents of this collection use [`set_source_server_i_ds`](Self::set_source_server_i_ds). /// /// <p>An array of Source Server IDs for which associated Recovery Instances should be returned.</p> pub fn source_server_i_ds(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.source_server_i_ds.unwrap_or_default(); v.push(input.into()); self.source_server_i_ds = Some(v); self } /// <p>An array of Source Server IDs for which associated Recovery Instances should be returned.</p> pub fn set_source_server_i_ds( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.source_server_i_ds = input; self } /// Consumes the builder and constructs a [`DescribeRecoveryInstancesRequestFilters`](crate::model::DescribeRecoveryInstancesRequestFilters) pub fn build(self) -> crate::model::DescribeRecoveryInstancesRequestFilters { crate::model::DescribeRecoveryInstancesRequestFilters { recovery_instance_i_ds: self.recovery_instance_i_ds, source_server_i_ds: self.source_server_i_ds, } } } } impl DescribeRecoveryInstancesRequestFilters { /// Creates a new builder-style object to manufacture [`DescribeRecoveryInstancesRequestFilters`](crate::model::DescribeRecoveryInstancesRequestFilters) pub fn builder() -> crate::model::describe_recovery_instances_request_filters::Builder { crate::model::describe_recovery_instances_request_filters::Builder::default() } } /// <p>A log outputted by a Job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct JobLog { /// <p>The date and time the log was taken.</p> pub log_date_time: std::option::Option<std::string::String>, /// <p>The event represents the type of a log.</p> pub event: std::option::Option<crate::model::JobLogEvent>, /// <p>Metadata associated with a Job log.</p> pub event_data: std::option::Option<crate::model::JobLogEventData>, } impl JobLog { /// <p>The date and time the log was taken.</p> pub fn log_date_time(&self) -> std::option::Option<&str> { self.log_date_time.as_deref() } /// <p>The event represents the type of a log.</p> pub fn event(&self) -> std::option::Option<&crate::model::JobLogEvent> { self.event.as_ref() } /// <p>Metadata associated with a Job log.</p> pub fn event_data(&self) -> std::option::Option<&crate::model::JobLogEventData> { self.event_data.as_ref() } } impl std::fmt::Debug for JobLog { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("JobLog"); formatter.field("log_date_time", &self.log_date_time); formatter.field("event", &self.event); formatter.field("event_data", &self.event_data); formatter.finish() } } /// See [`JobLog`](crate::model::JobLog) pub mod job_log { /// A builder for [`JobLog`](crate::model::JobLog) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) log_date_time: std::option::Option<std::string::String>, pub(crate) event: std::option::Option<crate::model::JobLogEvent>, pub(crate) event_data: std::option::Option<crate::model::JobLogEventData>, } impl Builder { /// <p>The date and time the log was taken.</p> pub fn log_date_time(mut self, input: impl Into<std::string::String>) -> Self { self.log_date_time = Some(input.into()); self } /// <p>The date and time the log was taken.</p> pub fn set_log_date_time( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.log_date_time = input; self } /// <p>The event represents the type of a log.</p> pub fn event(mut self, input: crate::model::JobLogEvent) -> Self { self.event = Some(input); self } /// <p>The event represents the type of a log.</p> pub fn set_event(mut self, input: std::option::Option<crate::model::JobLogEvent>) -> Self { self.event = input; self } /// <p>Metadata associated with a Job log.</p> pub fn event_data(mut self, input: crate::model::JobLogEventData) -> Self { self.event_data = Some(input); self } /// <p>Metadata associated with a Job log.</p> pub fn set_event_data( mut self, input: std::option::Option<crate::model::JobLogEventData>, ) -> Self { self.event_data = input; self } /// Consumes the builder and constructs a [`JobLog`](crate::model::JobLog) pub fn build(self) -> crate::model::JobLog { crate::model::JobLog { log_date_time: self.log_date_time, event: self.event, event_data: self.event_data, } } } } impl JobLog { /// Creates a new builder-style object to manufacture [`JobLog`](crate::model::JobLog) pub fn builder() -> crate::model::job_log::Builder { crate::model::job_log::Builder::default() } } /// <p>Metadata associated with a Job log.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct JobLogEventData { /// <p>The ID of a Source Server.</p> pub source_server_id: std::option::Option<std::string::String>, /// <p>The ID of a conversion server.</p> pub conversion_server_id: std::option::Option<std::string::String>, /// <p>The ID of a Recovery Instance.</p> pub target_instance_id: std::option::Option<std::string::String>, /// <p>A string representing a job error.</p> pub raw_error: std::option::Option<std::string::String>, } impl JobLogEventData { /// <p>The ID of a Source Server.</p> pub fn source_server_id(&self) -> std::option::Option<&str> { self.source_server_id.as_deref() } /// <p>The ID of a conversion server.</p> pub fn conversion_server_id(&self) -> std::option::Option<&str> { self.conversion_server_id.as_deref() } /// <p>The ID of a Recovery Instance.</p> pub fn target_instance_id(&self) -> std::option::Option<&str> { self.target_instance_id.as_deref() } /// <p>A string representing a job error.</p> pub fn raw_error(&self) -> std::option::Option<&str> { self.raw_error.as_deref() } } impl std::fmt::Debug for JobLogEventData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("JobLogEventData"); formatter.field("source_server_id", &self.source_server_id); formatter.field("conversion_server_id", &self.conversion_server_id); formatter.field("target_instance_id", &self.target_instance_id); formatter.field("raw_error", &self.raw_error); formatter.finish() } } /// See [`JobLogEventData`](crate::model::JobLogEventData) pub mod job_log_event_data { /// A builder for [`JobLogEventData`](crate::model::JobLogEventData) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) source_server_id: std::option::Option<std::string::String>, pub(crate) conversion_server_id: std::option::Option<std::string::String>, pub(crate) target_instance_id: std::option::Option<std::string::String>, pub(crate) raw_error: std::option::Option<std::string::String>, } impl Builder { /// <p>The ID of a Source Server.</p> pub fn source_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_server_id = Some(input.into()); self } /// <p>The ID of a Source Server.</p> pub fn set_source_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.source_server_id = input; self } /// <p>The ID of a conversion server.</p> pub fn conversion_server_id(mut self, input: impl Into<std::string::String>) -> Self { self.conversion_server_id = Some(input.into()); self } /// <p>The ID of a conversion server.</p> pub fn set_conversion_server_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.conversion_server_id = input; self } /// <p>The ID of a Recovery Instance.</p> pub fn target_instance_id(mut self, input: impl Into<std::string::String>) -> Self { self.target_instance_id = Some(input.into()); self } /// <p>The ID of a Recovery Instance.</p> pub fn set_target_instance_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.target_instance_id = input; self } /// <p>A string representing a job error.</p> pub fn raw_error(mut self, input: impl Into<std::string::String>) -> Self { self.raw_error = Some(input.into()); self } /// <p>A string representing a job error.</p> pub fn set_raw_error(mut self, input: std::option::Option<std::string::String>) -> Self { self.raw_error = input; self } /// Consumes the builder and constructs a [`JobLogEventData`](crate::model::JobLogEventData) pub fn build(self) -> crate::model::JobLogEventData { crate::model::JobLogEventData { source_server_id: self.source_server_id, conversion_server_id: self.conversion_server_id, target_instance_id: self.target_instance_id, raw_error: self.raw_error, } } } } impl JobLogEventData { /// Creates a new builder-style object to manufacture [`JobLogEventData`](crate::model::JobLogEventData) pub fn builder() -> crate::model::job_log_event_data::Builder { crate::model::job_log_event_data::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum JobLogEvent { #[allow(missing_docs)] // documentation missing in model CleanupEnd, #[allow(missing_docs)] // documentation missing in model CleanupFail, #[allow(missing_docs)] // documentation missing in model CleanupStart, #[allow(missing_docs)] // documentation missing in model ConversionEnd, #[allow(missing_docs)] // documentation missing in model ConversionFail, #[allow(missing_docs)] // documentation missing in model ConversionStart, #[allow(missing_docs)] // documentation missing in model JobCancel, #[allow(missing_docs)] // documentation missing in model JobEnd, #[allow(missing_docs)] // documentation missing in model JobStart, #[allow(missing_docs)] // documentation missing in model LaunchFailed, #[allow(missing_docs)] // documentation missing in model LaunchStart, #[allow(missing_docs)] // documentation missing in model ServerSkipped, #[allow(missing_docs)] // documentation missing in model SnapshotEnd, #[allow(missing_docs)] // documentation missing in model SnapshotFail, #[allow(missing_docs)] // documentation missing in model SnapshotStart, #[allow(missing_docs)] // documentation missing in model UsingPreviousSnapshot, #[allow(missing_docs)] // documentation missing in model UsingPreviousSnapshotFailed, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for JobLogEvent { fn from(s: &str) -> Self { match s { "CLEANUP_END" => JobLogEvent::CleanupEnd, "CLEANUP_FAIL" => JobLogEvent::CleanupFail, "CLEANUP_START" => JobLogEvent::CleanupStart, "CONVERSION_END" => JobLogEvent::ConversionEnd, "CONVERSION_FAIL" => JobLogEvent::ConversionFail, "CONVERSION_START" => JobLogEvent::ConversionStart, "JOB_CANCEL" => JobLogEvent::JobCancel, "JOB_END" => JobLogEvent::JobEnd, "JOB_START" => JobLogEvent::JobStart, "LAUNCH_FAILED" => JobLogEvent::LaunchFailed, "LAUNCH_START" => JobLogEvent::LaunchStart, "SERVER_SKIPPED" => JobLogEvent::ServerSkipped, "SNAPSHOT_END" => JobLogEvent::SnapshotEnd, "SNAPSHOT_FAIL" => JobLogEvent::SnapshotFail, "SNAPSHOT_START" => JobLogEvent::SnapshotStart, "USING_PREVIOUS_SNAPSHOT" => JobLogEvent::UsingPreviousSnapshot, "USING_PREVIOUS_SNAPSHOT_FAILED" => JobLogEvent::UsingPreviousSnapshotFailed, other => JobLogEvent::Unknown(other.to_owned()), } } } impl std::str::FromStr for JobLogEvent { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(JobLogEvent::from(s)) } } impl JobLogEvent { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { JobLogEvent::CleanupEnd => "CLEANUP_END", JobLogEvent::CleanupFail => "CLEANUP_FAIL", JobLogEvent::CleanupStart => "CLEANUP_START", JobLogEvent::ConversionEnd => "CONVERSION_END", JobLogEvent::ConversionFail => "CONVERSION_FAIL", JobLogEvent::ConversionStart => "CONVERSION_START", JobLogEvent::JobCancel => "JOB_CANCEL", JobLogEvent::JobEnd => "JOB_END", JobLogEvent::JobStart => "JOB_START", JobLogEvent::LaunchFailed => "LAUNCH_FAILED", JobLogEvent::LaunchStart => "LAUNCH_START", JobLogEvent::ServerSkipped => "SERVER_SKIPPED", JobLogEvent::SnapshotEnd => "SNAPSHOT_END", JobLogEvent::SnapshotFail => "SNAPSHOT_FAIL", JobLogEvent::SnapshotStart => "SNAPSHOT_START", JobLogEvent::UsingPreviousSnapshot => "USING_PREVIOUS_SNAPSHOT", JobLogEvent::UsingPreviousSnapshotFailed => "USING_PREVIOUS_SNAPSHOT_FAILED", JobLogEvent::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "CLEANUP_END", "CLEANUP_FAIL", "CLEANUP_START", "CONVERSION_END", "CONVERSION_FAIL", "CONVERSION_START", "JOB_CANCEL", "JOB_END", "JOB_START", "LAUNCH_FAILED", "LAUNCH_START", "SERVER_SKIPPED", "SNAPSHOT_END", "SNAPSHOT_FAIL", "SNAPSHOT_START", "USING_PREVIOUS_SNAPSHOT", "USING_PREVIOUS_SNAPSHOT_FAILED", ] } } impl AsRef<str> for JobLogEvent { fn as_ref(&self) -> &str { self.as_str() } } /// <p>A set of filters by which to return Jobs.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeJobsRequestFilters { /// <p>An array of Job IDs that should be returned. An empty array means all jobs.</p> pub job_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>The start date in a date range query.</p> pub from_date: std::option::Option<std::string::String>, /// <p>The end date in a date range query.</p> pub to_date: std::option::Option<std::string::String>, } impl DescribeJobsRequestFilters { /// <p>An array of Job IDs that should be returned. An empty array means all jobs.</p> pub fn job_i_ds(&self) -> std::option::Option<&[std::string::String]> { self.job_i_ds.as_deref() } /// <p>The start date in a date range query.</p> pub fn from_date(&self) -> std::option::Option<&str> { self.from_date.as_deref() } /// <p>The end date in a date range query.</p> pub fn to_date(&self) -> std::option::Option<&str> { self.to_date.as_deref() } } impl std::fmt::Debug for DescribeJobsRequestFilters { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeJobsRequestFilters"); formatter.field("job_i_ds", &self.job_i_ds); formatter.field("from_date", &self.from_date); formatter.field("to_date", &self.to_date); formatter.finish() } } /// See [`DescribeJobsRequestFilters`](crate::model::DescribeJobsRequestFilters) pub mod describe_jobs_request_filters { /// A builder for [`DescribeJobsRequestFilters`](crate::model::DescribeJobsRequestFilters) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) job_i_ds: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) from_date: std::option::Option<std::string::String>, pub(crate) to_date: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `job_i_ds`. /// /// To override the contents of this collection use [`set_job_i_ds`](Self::set_job_i_ds). /// /// <p>An array of Job IDs that should be returned. An empty array means all jobs.</p> pub fn job_i_ds(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.job_i_ds.unwrap_or_default(); v.push(input.into()); self.job_i_ds = Some(v); self } /// <p>An array of Job IDs that should be returned. An empty array means all jobs.</p> pub fn set_job_i_ds( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.job_i_ds = input; self } /// <p>The start date in a date range query.</p> pub fn from_date(mut self, input: impl Into<std::string::String>) -> Self { self.from_date = Some(input.into()); self } /// <p>The start date in a date range query.</p> pub fn set_from_date(mut self, input: std::option::Option<std::string::String>) -> Self { self.from_date = input; self } /// <p>The end date in a date range query.</p> pub fn to_date(mut self, input: impl Into<std::string::String>) -> Self { self.to_date = Some(input.into()); self } /// <p>The end date in a date range query.</p> pub fn set_to_date(mut self, input: std::option::Option<std::string::String>) -> Self { self.to_date = input; self } /// Consumes the builder and constructs a [`DescribeJobsRequestFilters`](crate::model::DescribeJobsRequestFilters) pub fn build(self) -> crate::model::DescribeJobsRequestFilters { crate::model::DescribeJobsRequestFilters { job_i_ds: self.job_i_ds, from_date: self.from_date, to_date: self.to_date, } } } } impl DescribeJobsRequestFilters { /// Creates a new builder-style object to manufacture [`DescribeJobsRequestFilters`](crate::model::DescribeJobsRequestFilters) pub fn builder() -> crate::model::describe_jobs_request_filters::Builder { crate::model::describe_jobs_request_filters::Builder::default() } }
expected_timestamp
decorator.py
# ---------------------------------------------------------------------- # Decorators # ---------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # NOC modules from noc.core.perf import metrics from .base import cache as x_cache def cachedmethod(cache=None, key="cache-%s", lock=None, ttl=None, version=0): """ Decorator to wrap class instance or method with memoizing callable :param cache: In-memory function which follows dict protocol. None, when no in-memory caching required :param key: Key mask to convert args to string :param lock: Callable to get threading lock :param ttl: Record time-to-live :param version: External cache version :return: """ def decorator(method): if lock: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_lock_acquires = metrics["cache_locks_acquires", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args with lock(self): perf_key_lock_acquires += 1 if cache: # Try in-memory cache c = cache(self) if c is not None: # In-memory cache provided try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass # Try external cache v = x_cache.get(k, version=version) if v: perf_key_l2_hits += 1 if cache: with lock(self): perf_key_lock_acquires += 1 # Backfill in-memory cache try: c[k] = v except ValueError: pass # Value too large return v # Fallback to function perf_key_misses += 1 v = method(self, *args, **kwargs) with lock(self): perf_key_lock_acquires += 1 if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Backfill external cache x_cache.set(k, v, ttl=ttl, version=version) # Done return v else: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args if cache: # Try in-memory cache c = cache(self) if c is not None: # In-memory cache provided try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass # Try external cache v = x_cache.get(k, version=version) if v:
# Fallback to function perf_key_misses += 1 v = method(self, *args, **kwargs) if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Backfill external cache x_cache.set(k, v, ttl=ttl, version=version) # Done return v return wrapper return decorator
perf_key_l2_hits += 1 if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Value too large return v
shop_recall.py
# _*_ coding: utf-8 _*_ from math import radians, cos, sin, asin, sqrt from app.libs.poi_search.es_search_category import search from app.libs.poi_search.es_search_street import search as search_street from app.models.new_shop import NewShop as Shop from app.models.group import Group from app.models.new_user import NewUser as User from app.model_views.shop import ShopCollection from sqlalchemy import and_ class Recall: @staticmethod def
(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数) """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # 将十进制度数转化为弧度 lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine公式 dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * asin(sqrt(a)) r = 6371 # 地球平均半径,单位为公里 return c * r * 1000 @staticmethod def list_page(input_list, page, page_size): if page_size*page >= len(input_list): return input_list[(page-1)*page_size:] else: return input_list[(page-1)*page_size:page*page_size-1] def sort_by_distance(self, page, page_size, search_words, location, user_id, category=""): from time import time t1 = time() current_lat = float(location["lat"]) current_lon = float(location["lon"]) search_res = search(location, keyword=search_words, category=category) # print(search_res) #search_res = search(location, keyword=[search_words]) search_res = self.list_page(search_res, page, page_size) if not search_res: res = { "total": 0, "current_page": page, "items": [], } return res t2 = time() print(t2-t1) filter_list = [] for item in search_res: filter_list.append(item['_source']['id']) # shop_data_list = Shop.query.filter(Shop.poi_id.in_(filter_list)).paginate(page=page, per_page=page_size, error_out=False) shop_data_list = Shop.query.filter(Shop.poi_id.in_(filter_list)).all() group_data_list = Group.query.filter(and_(Group.user_openid == user_id, Group.status == 2)).all() group_data_dict = {} for group_data in group_data_list: group_data_dict[group_data.poi_id] = 1 #print(shop_data_list.items) t3 = time() print(t3-t2) distance_list = [] street_info_list = [] for shop_data in shop_data_list: shop_lat = float(shop_data.latitude)/1e6 shop_lon = float(shop_data.longitude)/1e6 search_res_street = search_street({"lat": shop_lat, "lon": shop_lon}, keyword=[""]) if len(search_res_street) <= 0: street_name = shop_data.district else: street_name = search_res_street[0]['_source']['name'] street_info_list.append(street_name) distance = self.haversine(current_lon, current_lat, shop_lon, shop_lat) print(distance) distance_list.append(distance) t4 = time() print(t4-t3) shop_collection = ShopCollection(is_debug=True) shop_collection.fill(shop_data_list, distance_list, group_data_dict, street_info_list, user_id) #print(shop_collection.items) #print(shop_collection.items[0].name) res = { "total": 20, "current_page": page, "items": shop_collection.items, } return res
haversine
touchableIcon.tsx
import React from 'react' import { TouchableRipple } from 'react-native-paper' export default function
(props) { return ( <TouchableRipple onPress={props.onPress} style={{ display: 'flex', alignItems: 'center', justifyContent: 'center', backgroundColor: props.color || 'transparent', width: props.size || 48, height: props.size || 48, borderRadius: props.size / 2 || 24, }} rippleColor={props.rippleColor || 'white'} borderless > {props.children} </TouchableRipple> ) }
TouchableIcon
test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import decimal import errno import http.client import json import logging import os import re import subprocess import time from .authproxy import JSONRPCException from .util import ( assert_equal, delete_cookie_file, get_rpc_proxy, rpc_url, wait_until, p2p_port, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False): self.index = i self.datadir = os.path.join(dirname, "node" + str(i)) self.rpchost = rpchost if timewait:
if binary is None: self.binary = os.getenv("LITECOIND", "xcelrd") else: self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir # Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly. self.extra_args = extra_args self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i] self.cli = TestNodeCLI(os.getenv("LITECOINCLI", "xcelr-cli"), self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away self.p2ps = [] def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print("Cleaning up leftover process") self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection" return getattr(self.rpc, name) def start(self, extra_args=None, stderr=None, *args, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args if stderr is None: stderr = self.stderr # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir) self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs) self.running = True self.log.debug("xcelrd started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): assert self.process.poll() is None, "xcelrd exited with status %i during initialization" % self.process.returncode try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) raise AssertionError("Unable to connect to xcelrd") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected assert self.rpc wallet_path = "wallet/%s" % wallet_name return self.rpc / wallet_path def stop_node(self): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert_equal(return_code, 0) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def add_p2p_connection(self, p2p_conn, *args, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(*args, **kwargs) self.p2ps.append(p2p_conn) return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, "No p2p connection" return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running xcelr-cli command: %s" % command) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n")
self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60
conf.py
# -*- coding: utf-8 -*- import os import sys sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.todo', ] # TODO: Please Read! # Uncomment the below if you use native CircuitPython modules such as # digitalio, micropython and busio. List the modules you use. Without it, the # autodoc module docs will fail to generate with a warning. # autodoc_mock_imports = ["digitalio", "busio"] # autodoc_mock_imports = ["digitalio"] intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'Register': ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)} # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Adafruit MatrixKeypad Library' copyright = u'2018 ladyada' author = u'ladyada' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1.0' # The full version, including alpha/beta/rc tags. release = u'1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = "any" # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # If this is True, todo emits a warning for each TODO entries. The default is False. todo_emit_warnings = True napoleon_numpy_docstring = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.'] except: html_theme = 'default' html_theme_path = ['.'] else: html_theme_path = ['.'] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = '_static/favicon.ico' # Output file base name for HTML help builder. htmlhelp_basename = 'AdafruitMatrixkeypadLibrarydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'AdafruitMatrixKeypadLibrary.tex', u'AdafruitMatrixKeypad Library Documentation', author, 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'AdafruitMatrixKeypadlibrary', u'Adafruit MatrixKeypad Library Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'AdafruitMatrixKeypadLibrary', u'Adafruit MatrixKeypad Library Documentation', author, 'AdafruitMatrixKeypadLibrary', 'One line description of project.', 'Miscellaneous'), ]
#
serzh_video.js
import React, { Component } from 'react'; import { Link } from 'react-router-dom'; class
extends Component { render() { return ( <div className="box"> <header> <div className="header-box"> <h1 className=""> WATCH HOW PROTESTERS SPEND A NIGHT AT THE FRENCH SQUARE<span>!</span> </h1> </div> </header> <main> <section> <div className="video-YouTube"> <iframe src="https://www.youtube.com/embed/UJ-0ZAl3XhI" frameBorder="0" allow="autoplay; encrypted-media" allowFullScreen="allowfullscreen" title="Velvet Revolution Cutscene"></iframe> </div> <div className="sub-section" style={{marginTop: "10px"}}> <Link to="/citizen/dialer_serzh"> <div className="buttonLink f-f--ourF"> <span className="f-f--ourF"> I Watched It! </span> </div> </Link> </div> </section> </main> </div> ); } } export default App;
App
models.go
package cdn // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "encoding/json" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/to" "github.com/Azure/go-autorest/tracing" "net/http" ) // The package's fully qualified name. const fqdn = "github.com/Azure/azure-sdk-for-go/services/cdn/mgmt/2019-04-15/cdn" // CacheExpirationActionParameters defines the parameters for the cache expiration action. type CacheExpirationActionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // CacheBehavior - Caching behavior for the requests. Possible values include: 'BypassCache', 'Override', 'SetIfMissing' CacheBehavior CacheBehavior `json:"cacheBehavior,omitempty"` // CacheType - The level at which the content needs to be cached. CacheType *string `json:"cacheType,omitempty"` // CacheDuration - The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss CacheDuration *string `json:"cacheDuration,omitempty"` } // CacheKeyQueryStringActionParameters defines the parameters for the cache-key query string action. type CacheKeyQueryStringActionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // QueryStringBehavior - Caching behavior for the requests. Possible values include: 'Include', 'IncludeAll', 'Exclude', 'ExcludeAll' QueryStringBehavior QueryStringBehavior `json:"queryStringBehavior,omitempty"` // QueryParameters - query parameters to include or exclude (comma separated). QueryParameters *string `json:"queryParameters,omitempty"` } // CertificateSourceParameters defines the parameters for using CDN managed certificate for securing custom // domain. type CertificateSourceParameters struct { OdataType *string `json:"@odata.type,omitempty"` // CertificateType - Type of certificate used. Possible values include: 'Shared', 'Dedicated' CertificateType CertificateType `json:"certificateType,omitempty"` } // CheckNameAvailabilityInput input of CheckNameAvailability API. type CheckNameAvailabilityInput struct { // Name - The resource name to validate. Name *string `json:"name,omitempty"` // Type - The type of the resource whose name is to be validated. Type *string `json:"type,omitempty"` } // CheckNameAvailabilityOutput output of check name availability API. type CheckNameAvailabilityOutput struct { autorest.Response `json:"-"` // NameAvailable - READ-ONLY; Indicates whether the name is available. NameAvailable *bool `json:"nameAvailable,omitempty"` // Reason - READ-ONLY; The reason why the name is not available. Reason *string `json:"reason,omitempty"` // Message - READ-ONLY; The detailed error message describing why the name is not available. Message *string `json:"message,omitempty"` } // CidrIPAddress CIDR Ip address type CidrIPAddress struct { // BaseIPAddress - Ip address itself. BaseIPAddress *string `json:"baseIpAddress,omitempty"` // PrefixLength - The length of the prefix of the ip address. PrefixLength *int32 `json:"prefixLength,omitempty"` } // CookiesMatchConditionParameters defines the parameters for Cookies match conditions type CookiesMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Selector - Name of Cookies to be matched Selector *string `json:"selector,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith', 'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual' Operator CookiesOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // CustomDomain friendly domain name mapping to the endpoint hostname that the customer provides for branding // purposes, e.g. www.contoso.com. type CustomDomain struct { autorest.Response `json:"-"` *CustomDomainProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for CustomDomain. func (cd CustomDomain) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if cd.CustomDomainProperties != nil { objectMap["properties"] = cd.CustomDomainProperties } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for CustomDomain struct. func (cd *CustomDomain) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var customDomainProperties CustomDomainProperties err = json.Unmarshal(*v, &customDomainProperties) if err != nil { return err } cd.CustomDomainProperties = &customDomainProperties } case "id": if v != nil { var ID string err = json.Unmarshal(*v, &ID) if err != nil { return err } cd.ID = &ID } case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } cd.Name = &name } case "type": if v != nil { var typeVar string err = json.Unmarshal(*v, &typeVar) if err != nil { return err } cd.Type = &typeVar } } } return nil } // BasicCustomDomainHTTPSParameters the JSON object that contains the properties to secure a custom domain. type BasicCustomDomainHTTPSParameters interface { AsManagedHTTPSParameters() (*ManagedHTTPSParameters, bool) AsUserManagedHTTPSParameters() (*UserManagedHTTPSParameters, bool) AsCustomDomainHTTPSParameters() (*CustomDomainHTTPSParameters, bool) } // CustomDomainHTTPSParameters the JSON object that contains the properties to secure a custom domain. type CustomDomainHTTPSParameters struct { // ProtocolType - Defines the TLS extension protocol that is used for secure delivery. Possible values include: 'ServerNameIndication', 'IPBased' ProtocolType ProtocolType `json:"protocolType,omitempty"` // MinimumTLSVersion - TLS protocol version that will be used for Https. Possible values include: 'None', 'TLS10', 'TLS12' MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` // CertificateSource - Possible values include: 'CertificateSourceCustomDomainHTTPSParameters', 'CertificateSourceCdn', 'CertificateSourceAzureKeyVault' CertificateSource CertificateSource `json:"certificateSource,omitempty"` } func unmarshalBasicCustomDomainHTTPSParameters(body []byte) (BasicCustomDomainHTTPSParameters, error) { var m map[string]interface{} err := json.Unmarshal(body, &m) if err != nil { return nil, err } switch m["certificateSource"] { case string(CertificateSourceCdn): var mhp ManagedHTTPSParameters err := json.Unmarshal(body, &mhp) return mhp, err case string(CertificateSourceAzureKeyVault): var umhp UserManagedHTTPSParameters err := json.Unmarshal(body, &umhp) return umhp, err default: var cdhp CustomDomainHTTPSParameters err := json.Unmarshal(body, &cdhp) return cdhp, err } } func unmarshalBasicCustomDomainHTTPSParametersArray(body []byte) ([]BasicCustomDomainHTTPSParameters, error) { var rawMessages []*json.RawMessage err := json.Unmarshal(body, &rawMessages) if err != nil { return nil, err } cdhpArray := make([]BasicCustomDomainHTTPSParameters, len(rawMessages)) for index, rawMessage := range rawMessages { cdhp, err := unmarshalBasicCustomDomainHTTPSParameters(*rawMessage) if err != nil { return nil, err } cdhpArray[index] = cdhp } return cdhpArray, nil } // MarshalJSON is the custom marshaler for CustomDomainHTTPSParameters. func (cdhp CustomDomainHTTPSParameters) MarshalJSON() ([]byte, error) { cdhp.CertificateSource = CertificateSourceCustomDomainHTTPSParameters objectMap := make(map[string]interface{}) if cdhp.ProtocolType != "" { objectMap["protocolType"] = cdhp.ProtocolType } if cdhp.MinimumTLSVersion != "" { objectMap["minimumTlsVersion"] = cdhp.MinimumTLSVersion } if cdhp.CertificateSource != "" { objectMap["certificateSource"] = cdhp.CertificateSource } return json.Marshal(objectMap) } // AsManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for CustomDomainHTTPSParameters. func (cdhp CustomDomainHTTPSParameters) AsManagedHTTPSParameters() (*ManagedHTTPSParameters, bool) { return nil, false } // AsUserManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for CustomDomainHTTPSParameters. func (cdhp CustomDomainHTTPSParameters) AsUserManagedHTTPSParameters() (*UserManagedHTTPSParameters, bool) { return nil, false } // AsCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for CustomDomainHTTPSParameters. func (cdhp CustomDomainHTTPSParameters) AsCustomDomainHTTPSParameters() (*CustomDomainHTTPSParameters, bool) { return &cdhp, true } // AsBasicCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for CustomDomainHTTPSParameters. func (cdhp CustomDomainHTTPSParameters) AsBasicCustomDomainHTTPSParameters() (BasicCustomDomainHTTPSParameters, bool) { return &cdhp, true } // CustomDomainListResult result of the request to list custom domains. It contains a list of custom domain // objects and a URL link to get the next set of results. type CustomDomainListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of CDN CustomDomains within an endpoint. Value *[]CustomDomain `json:"value,omitempty"` // NextLink - URL to get the next set of custom domain objects if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for CustomDomainListResult. func (cdlr CustomDomainListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if cdlr.NextLink != nil { objectMap["nextLink"] = cdlr.NextLink } return json.Marshal(objectMap) } // CustomDomainListResultIterator provides access to a complete listing of CustomDomain values. type CustomDomainListResultIterator struct { i int page CustomDomainListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *CustomDomainListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/CustomDomainListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *CustomDomainListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter CustomDomainListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter CustomDomainListResultIterator) Response() CustomDomainListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter CustomDomainListResultIterator) Value() CustomDomain { if !iter.page.NotDone() { return CustomDomain{} } return iter.page.Values()[iter.i] } // Creates a new instance of the CustomDomainListResultIterator type. func NewCustomDomainListResultIterator(page CustomDomainListResultPage) CustomDomainListResultIterator { return CustomDomainListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (cdlr CustomDomainListResult) IsEmpty() bool { return cdlr.Value == nil || len(*cdlr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (cdlr CustomDomainListResult) hasNextLink() bool { return cdlr.NextLink != nil && len(*cdlr.NextLink) != 0 } // customDomainListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (cdlr CustomDomainListResult) customDomainListResultPreparer(ctx context.Context) (*http.Request, error) { if !cdlr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(cdlr.NextLink))) } // CustomDomainListResultPage contains a page of CustomDomain values. type CustomDomainListResultPage struct { fn func(context.Context, CustomDomainListResult) (CustomDomainListResult, error) cdlr CustomDomainListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *CustomDomainListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/CustomDomainListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.cdlr) if err != nil { return err } page.cdlr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *CustomDomainListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page CustomDomainListResultPage) NotDone() bool { return !page.cdlr.IsEmpty() } // Response returns the raw server response from the last page request. func (page CustomDomainListResultPage) Response() CustomDomainListResult { return page.cdlr } // Values returns the slice of values for the current page or nil if there are no values. func (page CustomDomainListResultPage) Values() []CustomDomain { if page.cdlr.IsEmpty() { return nil } return *page.cdlr.Value } // Creates a new instance of the CustomDomainListResultPage type. func NewCustomDomainListResultPage(getNextPage func(context.Context, CustomDomainListResult) (CustomDomainListResult, error)) CustomDomainListResultPage { return CustomDomainListResultPage{fn: getNextPage} } // CustomDomainParameters the customDomain JSON object required for custom domain creation or update. type CustomDomainParameters struct { *CustomDomainPropertiesParameters `json:"properties,omitempty"` } // MarshalJSON is the custom marshaler for CustomDomainParameters. func (cdp CustomDomainParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if cdp.CustomDomainPropertiesParameters != nil { objectMap["properties"] = cdp.CustomDomainPropertiesParameters } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for CustomDomainParameters struct. func (cdp *CustomDomainParameters) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var customDomainPropertiesParameters CustomDomainPropertiesParameters err = json.Unmarshal(*v, &customDomainPropertiesParameters) if err != nil { return err } cdp.CustomDomainPropertiesParameters = &customDomainPropertiesParameters } } } return nil } // CustomDomainProperties the JSON object that contains the properties of the custom domain to create. type CustomDomainProperties struct { // HostName - The host name of the custom domain. Must be a domain name. HostName *string `json:"hostName,omitempty"` // ResourceState - READ-ONLY; Resource status of the custom domain. Possible values include: 'Creating', 'Active', 'Deleting' ResourceState CustomDomainResourceState `json:"resourceState,omitempty"` // CustomHTTPSProvisioningState - READ-ONLY; Provisioning status of Custom Https of the custom domain. Possible values include: 'Enabling', 'Enabled', 'Disabling', 'Disabled', 'Failed' CustomHTTPSProvisioningState CustomHTTPSProvisioningState `json:"customHttpsProvisioningState,omitempty"` // CustomHTTPSProvisioningSubstate - READ-ONLY; Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step. Possible values include: 'SubmittingDomainControlValidationRequest', 'PendingDomainControlValidationREquestApproval', 'DomainControlValidationRequestApproved', 'DomainControlValidationRequestRejected', 'DomainControlValidationRequestTimedOut', 'IssuingCertificate', 'DeployingCertificate', 'CertificateDeployed', 'DeletingCertificate', 'CertificateDeleted' CustomHTTPSProvisioningSubstate CustomHTTPSProvisioningSubstate `json:"customHttpsProvisioningSubstate,omitempty"` // CustomHTTPSParameters - Certificate parameters for securing custom HTTPS CustomHTTPSParameters BasicCustomDomainHTTPSParameters `json:"customHttpsParameters,omitempty"` // ValidationData - Special validation or data may be required when delivering CDN to some regions due to local compliance reasons. E.g. ICP license number of a custom domain is required to deliver content in China. ValidationData *string `json:"validationData,omitempty"` // ProvisioningState - READ-ONLY; Provisioning status of the custom domain. ProvisioningState *string `json:"provisioningState,omitempty"` } // MarshalJSON is the custom marshaler for CustomDomainProperties. func (cdp CustomDomainProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if cdp.HostName != nil { objectMap["hostName"] = cdp.HostName } objectMap["customHttpsParameters"] = cdp.CustomHTTPSParameters if cdp.ValidationData != nil { objectMap["validationData"] = cdp.ValidationData } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for CustomDomainProperties struct. func (cdp *CustomDomainProperties) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "hostName": if v != nil { var hostName string err = json.Unmarshal(*v, &hostName) if err != nil { return err } cdp.HostName = &hostName } case "resourceState": if v != nil { var resourceState CustomDomainResourceState err = json.Unmarshal(*v, &resourceState) if err != nil { return err } cdp.ResourceState = resourceState } case "customHttpsProvisioningState": if v != nil { var customHTTPSProvisioningState CustomHTTPSProvisioningState err = json.Unmarshal(*v, &customHTTPSProvisioningState) if err != nil { return err } cdp.CustomHTTPSProvisioningState = customHTTPSProvisioningState } case "customHttpsProvisioningSubstate": if v != nil { var customHTTPSProvisioningSubstate CustomHTTPSProvisioningSubstate err = json.Unmarshal(*v, &customHTTPSProvisioningSubstate) if err != nil { return err } cdp.CustomHTTPSProvisioningSubstate = customHTTPSProvisioningSubstate } case "customHttpsParameters": if v != nil { customHTTPSParameters, err := unmarshalBasicCustomDomainHTTPSParameters(*v) if err != nil { return err } cdp.CustomHTTPSParameters = customHTTPSParameters } case "validationData": if v != nil { var validationData string err = json.Unmarshal(*v, &validationData) if err != nil { return err } cdp.ValidationData = &validationData } case "provisioningState": if v != nil { var provisioningState string err = json.Unmarshal(*v, &provisioningState) if err != nil { return err } cdp.ProvisioningState = &provisioningState } } } return nil } // CustomDomainPropertiesParameters the JSON object that contains the properties of the custom domain to // create. type CustomDomainPropertiesParameters struct { // HostName - The host name of the custom domain. Must be a domain name. HostName *string `json:"hostName,omitempty"` } // CustomDomainsCreateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type CustomDomainsCreateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *CustomDomainsCreateFuture) Result(client CustomDomainsClient) (cd CustomDomain, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.CustomDomainsCreateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.CustomDomainsCreateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if cd.Response.Response, err = future.GetResult(sender); err == nil && cd.Response.Response.StatusCode != http.StatusNoContent { cd, err = client.CreateResponder(cd.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.CustomDomainsCreateFuture", "Result", cd.Response.Response, "Failure responding to request") } } return } // CustomDomainsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type CustomDomainsDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *CustomDomainsDeleteFuture) Result(client CustomDomainsClient) (cd CustomDomain, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.CustomDomainsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.CustomDomainsDeleteFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if cd.Response.Response, err = future.GetResult(sender); err == nil && cd.Response.Response.StatusCode != http.StatusNoContent { cd, err = client.DeleteResponder(cd.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.CustomDomainsDeleteFuture", "Result", cd.Response.Response, "Failure responding to request") } } return } // DeepCreatedOrigin the main origin of CDN content which is added when creating a CDN endpoint. type DeepCreatedOrigin struct { // Name - Origin name Name *string `json:"name,omitempty"` *DeepCreatedOriginProperties `json:"properties,omitempty"` } // MarshalJSON is the custom marshaler for DeepCreatedOrigin. func (dco DeepCreatedOrigin) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if dco.Name != nil { objectMap["name"] = dco.Name } if dco.DeepCreatedOriginProperties != nil { objectMap["properties"] = dco.DeepCreatedOriginProperties } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for DeepCreatedOrigin struct. func (dco *DeepCreatedOrigin) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } dco.Name = &name } case "properties": if v != nil { var deepCreatedOriginProperties DeepCreatedOriginProperties err = json.Unmarshal(*v, &deepCreatedOriginProperties) if err != nil { return err } dco.DeepCreatedOriginProperties = &deepCreatedOriginProperties } } } return nil } // DeepCreatedOriginProperties properties of the origin created on the CDN endpoint. type DeepCreatedOriginProperties struct { // HostName - The address of the origin. It can be a domain name, IPv4 address, or IPv6 address. HostName *string `json:"hostName,omitempty"` // HTTPPort - The value of the HTTP port. Must be between 1 and 65535 HTTPPort *int32 `json:"httpPort,omitempty"` // HTTPSPort - The value of the HTTPS port. Must be between 1 and 65535 HTTPSPort *int32 `json:"httpsPort,omitempty"` } // DeliveryRule a rule that specifies a set of actions and conditions type DeliveryRule struct { // Name - Name of the rule Name *string `json:"name,omitempty"` // Order - The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied. Order *int32 `json:"order,omitempty"` // Conditions - A list of conditions that must be matched for the actions to be executed Conditions *[]BasicDeliveryRuleCondition `json:"conditions,omitempty"` // Actions - A list of actions that are executed when all the conditions of a rule are satisfied. Actions *[]BasicDeliveryRuleAction `json:"actions,omitempty"` } // UnmarshalJSON is the custom unmarshaler for DeliveryRule struct. func (dr *DeliveryRule) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } dr.Name = &name } case "order": if v != nil { var order int32 err = json.Unmarshal(*v, &order) if err != nil { return err } dr.Order = &order } case "conditions": if v != nil { conditions, err := unmarshalBasicDeliveryRuleConditionArray(*v) if err != nil { return err } dr.Conditions = &conditions } case "actions": if v != nil { actions, err := unmarshalBasicDeliveryRuleActionArray(*v) if err != nil { return err } dr.Actions = &actions } } } return nil } // BasicDeliveryRuleAction an action for the delivery rule. type BasicDeliveryRuleAction interface { AsURLRedirectAction() (*URLRedirectAction, bool) AsURLRewriteAction() (*URLRewriteAction, bool) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) } // DeliveryRuleAction an action for the delivery rule. type DeliveryRuleAction struct { // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } func unmarshalBasicDeliveryRuleAction(body []byte) (BasicDeliveryRuleAction, error) { var m map[string]interface{} err := json.Unmarshal(body, &m) if err != nil { return nil, err } switch m["name"] { case string(NameURLRedirect): var ura URLRedirectAction err := json.Unmarshal(body, &ura) return ura, err case string(NameURLRewrite): var ura URLRewriteAction err := json.Unmarshal(body, &ura) return ura, err case string(NameModifyRequestHeader): var drrha DeliveryRuleRequestHeaderAction err := json.Unmarshal(body, &drrha) return drrha, err case string(NameModifyResponseHeader): var drrha DeliveryRuleResponseHeaderAction err := json.Unmarshal(body, &drrha) return drrha, err case string(NameCacheExpiration): var drcea DeliveryRuleCacheExpirationAction err := json.Unmarshal(body, &drcea) return drcea, err case string(NameCacheKeyQueryString): var drckqsa DeliveryRuleCacheKeyQueryStringAction err := json.Unmarshal(body, &drckqsa) return drckqsa, err default: var dra DeliveryRuleAction err := json.Unmarshal(body, &dra) return dra, err } } func unmarshalBasicDeliveryRuleActionArray(body []byte) ([]BasicDeliveryRuleAction, error) { var rawMessages []*json.RawMessage err := json.Unmarshal(body, &rawMessages) if err != nil { return nil, err } draArray := make([]BasicDeliveryRuleAction, len(rawMessages)) for index, rawMessage := range rawMessages { dra, err := unmarshalBasicDeliveryRuleAction(*rawMessage) if err != nil { return nil, err } draArray[index] = dra } return draArray, nil } // MarshalJSON is the custom marshaler for DeliveryRuleAction. func (dra DeliveryRuleAction) MarshalJSON() ([]byte, error) { dra.Name = NameDeliveryRuleAction objectMap := make(map[string]interface{}) if dra.Name != "" { objectMap["name"] = dra.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return &dra, true } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleAction. func (dra DeliveryRuleAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &dra, true } // DeliveryRuleCacheExpirationAction defines the cache expiration action for the delivery rule. type DeliveryRuleCacheExpirationAction struct { // Parameters - Defines the parameters for the action. Parameters *CacheExpirationActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) MarshalJSON() ([]byte, error) { drcea.Name = NameCacheExpiration objectMap := make(map[string]interface{}) if drcea.Parameters != nil { objectMap["parameters"] = drcea.Parameters } if drcea.Name != "" { objectMap["name"] = drcea.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return &drcea, true } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheExpirationAction. func (drcea DeliveryRuleCacheExpirationAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &drcea, true } // DeliveryRuleCacheKeyQueryStringAction defines the cache-key query string action for the delivery rule. type DeliveryRuleCacheKeyQueryStringAction struct { // Parameters - Defines the parameters for the action. Parameters *CacheKeyQueryStringActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) MarshalJSON() ([]byte, error) { drckqsa.Name = NameCacheKeyQueryString objectMap := make(map[string]interface{}) if drckqsa.Parameters != nil { objectMap["parameters"] = drckqsa.Parameters } if drckqsa.Name != "" { objectMap["name"] = drckqsa.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return &drckqsa, true } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleCacheKeyQueryStringAction. func (drckqsa DeliveryRuleCacheKeyQueryStringAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &drckqsa, true } // BasicDeliveryRuleCondition a condition for the delivery rule. type BasicDeliveryRuleCondition interface { AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) } // DeliveryRuleCondition a condition for the delivery rule. type DeliveryRuleCondition struct { // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } func unmarshalBasicDeliveryRuleCondition(body []byte) (BasicDeliveryRuleCondition, error) { var m map[string]interface{} err := json.Unmarshal(body, &m) if err != nil { return nil, err } switch m["name"] { case string(NameRemoteAddress): var drrac DeliveryRuleRemoteAddressCondition err := json.Unmarshal(body, &drrac) return drrac, err case string(NameRequestMethod): var drrmc DeliveryRuleRequestMethodCondition err := json.Unmarshal(body, &drrmc) return drrmc, err case string(NameQueryString): var drqsc DeliveryRuleQueryStringCondition err := json.Unmarshal(body, &drqsc) return drqsc, err case string(NamePostArgs): var drpac DeliveryRulePostArgsCondition err := json.Unmarshal(body, &drpac) return drpac, err case string(NameRequestURI): var drruc DeliveryRuleRequestURICondition err := json.Unmarshal(body, &drruc) return drruc, err case string(NameRequestHeader): var drrhc DeliveryRuleRequestHeaderCondition err := json.Unmarshal(body, &drrhc) return drrhc, err case string(NameRequestBody): var drrbc DeliveryRuleRequestBodyCondition err := json.Unmarshal(body, &drrbc) return drrbc, err case string(NameRequestScheme): var drrsc DeliveryRuleRequestSchemeCondition err := json.Unmarshal(body, &drrsc) return drrsc, err case string(NameURLPath): var drupc DeliveryRuleURLPathCondition err := json.Unmarshal(body, &drupc) return drupc, err case string(NameURLFileExtension): var drufec DeliveryRuleURLFileExtensionCondition err := json.Unmarshal(body, &drufec) return drufec, err case string(NameURLFileName): var drufnc DeliveryRuleURLFileNameCondition err := json.Unmarshal(body, &drufnc) return drufnc, err case string(NameHTTPVersion): var drhvc DeliveryRuleHTTPVersionCondition err := json.Unmarshal(body, &drhvc) return drhvc, err case string(NameCookies): var drcc DeliveryRuleCookiesCondition err := json.Unmarshal(body, &drcc) return drcc, err case string(NameIsDevice): var dridc DeliveryRuleIsDeviceCondition err := json.Unmarshal(body, &dridc) return dridc, err default: var drc DeliveryRuleCondition err := json.Unmarshal(body, &drc) return drc, err } } func unmarshalBasicDeliveryRuleConditionArray(body []byte) ([]BasicDeliveryRuleCondition, error) { var rawMessages []*json.RawMessage err := json.Unmarshal(body, &rawMessages) if err != nil { return nil, err } drcArray := make([]BasicDeliveryRuleCondition, len(rawMessages)) for index, rawMessage := range rawMessages { drc, err := unmarshalBasicDeliveryRuleCondition(*rawMessage) if err != nil { return nil, err } drcArray[index] = drc } return drcArray, nil } // MarshalJSON is the custom marshaler for DeliveryRuleCondition. func (drc DeliveryRuleCondition) MarshalJSON() ([]byte, error) { drc.Name = NameDeliveryRuleCondition objectMap := make(map[string]interface{}) if drc.Name != "" { objectMap["name"] = drc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return &drc, true } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCondition. func (drc DeliveryRuleCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drc, true } // DeliveryRuleCookiesCondition defines the Cookies condition for the delivery rule. type DeliveryRuleCookiesCondition struct { // Parameters - Defines the parameters for the condition. Parameters *CookiesMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) MarshalJSON() ([]byte, error) { drcc.Name = NameCookies objectMap := make(map[string]interface{}) if drcc.Parameters != nil { objectMap["parameters"] = drcc.Parameters } if drcc.Name != "" { objectMap["name"] = drcc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return &drcc, true } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleCookiesCondition. func (drcc DeliveryRuleCookiesCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drcc, true } // DeliveryRuleHTTPVersionCondition defines the HttpVersion condition for the delivery rule. type DeliveryRuleHTTPVersionCondition struct { // Parameters - Defines the parameters for the condition. Parameters *HTTPVersionMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) MarshalJSON() ([]byte, error) { drhvc.Name = NameHTTPVersion objectMap := make(map[string]interface{}) if drhvc.Parameters != nil { objectMap["parameters"] = drhvc.Parameters } if drhvc.Name != "" { objectMap["name"] = drhvc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return &drhvc, true } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleHTTPVersionCondition. func (drhvc DeliveryRuleHTTPVersionCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drhvc, true } // DeliveryRuleIsDeviceCondition defines the IsDevice condition for the delivery rule. type DeliveryRuleIsDeviceCondition struct { // Parameters - Defines the parameters for the condition. Parameters *IsDeviceMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) MarshalJSON() ([]byte, error) { dridc.Name = NameIsDevice objectMap := make(map[string]interface{}) if dridc.Parameters != nil { objectMap["parameters"] = dridc.Parameters } if dridc.Name != "" { objectMap["name"] = dridc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return &dridc, true } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleIsDeviceCondition. func (dridc DeliveryRuleIsDeviceCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &dridc, true } // DeliveryRulePostArgsCondition defines the PostArgs condition for the delivery rule. type DeliveryRulePostArgsCondition struct { // Parameters - Defines the parameters for the condition. Parameters *PostArgsMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) MarshalJSON() ([]byte, error) { drpac.Name = NamePostArgs objectMap := make(map[string]interface{}) if drpac.Parameters != nil { objectMap["parameters"] = drpac.Parameters } if drpac.Name != "" { objectMap["name"] = drpac.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return &drpac, true } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRulePostArgsCondition. func (drpac DeliveryRulePostArgsCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drpac, true } // DeliveryRuleQueryStringCondition defines the QueryString condition for the delivery rule. type DeliveryRuleQueryStringCondition struct { // Parameters - Defines the parameters for the condition. Parameters *QueryStringMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) MarshalJSON() ([]byte, error) { drqsc.Name = NameQueryString objectMap := make(map[string]interface{}) if drqsc.Parameters != nil { objectMap["parameters"] = drqsc.Parameters } if drqsc.Name != "" { objectMap["name"] = drqsc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return &drqsc, true } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleQueryStringCondition. func (drqsc DeliveryRuleQueryStringCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drqsc, true } // DeliveryRuleRemoteAddressCondition defines the RemoteAddress condition for the delivery rule. type DeliveryRuleRemoteAddressCondition struct { // Parameters - Defines the parameters for the condition. Parameters *RemoteAddressMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) MarshalJSON() ([]byte, error) { drrac.Name = NameRemoteAddress objectMap := make(map[string]interface{}) if drrac.Parameters != nil { objectMap["parameters"] = drrac.Parameters } if drrac.Name != "" { objectMap["name"] = drrac.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return &drrac, true } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRemoteAddressCondition. func (drrac DeliveryRuleRemoteAddressCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drrac, true } // DeliveryRuleRequestBodyCondition defines the RequestBody condition for the delivery rule. type DeliveryRuleRequestBodyCondition struct { // Parameters - Defines the parameters for the condition. Parameters *RequestBodyMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) MarshalJSON() ([]byte, error) { drrbc.Name = NameRequestBody objectMap := make(map[string]interface{}) if drrbc.Parameters != nil { objectMap["parameters"] = drrbc.Parameters } if drrbc.Name != "" { objectMap["name"] = drrbc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return &drrbc, true } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestBodyCondition. func (drrbc DeliveryRuleRequestBodyCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drrbc, true } // DeliveryRuleRequestHeaderAction defines the request header action for the delivery rule. type DeliveryRuleRequestHeaderAction struct { // Parameters - Defines the parameters for the action. Parameters *HeaderActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) MarshalJSON() ([]byte, error) { drrha.Name = NameModifyRequestHeader objectMap := make(map[string]interface{}) if drrha.Parameters != nil { objectMap["parameters"] = drrha.Parameters } if drrha.Name != "" { objectMap["name"] = drrha.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return &drrha, true } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleRequestHeaderAction. func (drrha DeliveryRuleRequestHeaderAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &drrha, true } // DeliveryRuleRequestHeaderCondition defines the RequestHeader condition for the delivery rule. type DeliveryRuleRequestHeaderCondition struct { // Parameters - Defines the parameters for the condition. Parameters *RequestHeaderMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) MarshalJSON() ([]byte, error) { drrhc.Name = NameRequestHeader objectMap := make(map[string]interface{}) if drrhc.Parameters != nil { objectMap["parameters"] = drrhc.Parameters } if drrhc.Name != "" { objectMap["name"] = drrhc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return &drrhc, true } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestHeaderCondition. func (drrhc DeliveryRuleRequestHeaderCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drrhc, true } // DeliveryRuleRequestMethodCondition defines the RequestMethod condition for the delivery rule. type DeliveryRuleRequestMethodCondition struct { // Parameters - Defines the parameters for the condition. Parameters *RequestMethodMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) MarshalJSON() ([]byte, error) { drrmc.Name = NameRequestMethod objectMap := make(map[string]interface{}) if drrmc.Parameters != nil { objectMap["parameters"] = drrmc.Parameters } if drrmc.Name != "" { objectMap["name"] = drrmc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return &drrmc, true } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestMethodCondition. func (drrmc DeliveryRuleRequestMethodCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drrmc, true } // DeliveryRuleRequestSchemeCondition defines the RequestScheme condition for the delivery rule. type DeliveryRuleRequestSchemeCondition struct { // Parameters - Defines the parameters for the condition. Parameters *RequestSchemeMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) MarshalJSON() ([]byte, error) { drrsc.Name = NameRequestScheme objectMap := make(map[string]interface{}) if drrsc.Parameters != nil { objectMap["parameters"] = drrsc.Parameters } if drrsc.Name != "" { objectMap["name"] = drrsc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return &drrsc, true } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestSchemeCondition. func (drrsc DeliveryRuleRequestSchemeCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drrsc, true } // DeliveryRuleRequestURICondition defines the RequestUri condition for the delivery rule. type DeliveryRuleRequestURICondition struct { // Parameters - Defines the parameters for the condition. Parameters *RequestURIMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) MarshalJSON() ([]byte, error) { drruc.Name = NameRequestURI objectMap := make(map[string]interface{}) if drruc.Parameters != nil { objectMap["parameters"] = drruc.Parameters } if drruc.Name != "" { objectMap["name"] = drruc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return &drruc, true } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleRequestURICondition. func (drruc DeliveryRuleRequestURICondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drruc, true } // DeliveryRuleResponseHeaderAction defines the response header action for the delivery rule. type DeliveryRuleResponseHeaderAction struct { // Parameters - Defines the parameters for the action. Parameters *HeaderActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) MarshalJSON() ([]byte, error) { drrha.Name = NameModifyResponseHeader objectMap := make(map[string]interface{}) if drrha.Parameters != nil { objectMap["parameters"] = drrha.Parameters } if drrha.Name != "" { objectMap["name"] = drrha.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return &drrha, true } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for DeliveryRuleResponseHeaderAction. func (drrha DeliveryRuleResponseHeaderAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &drrha, true } // DeliveryRuleURLFileExtensionCondition defines the UrlFileExtension condition for the delivery rule. type DeliveryRuleURLFileExtensionCondition struct { // Parameters - Defines the parameters for the condition. Parameters *URLFileExtensionMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) MarshalJSON() ([]byte, error) { drufec.Name = NameURLFileExtension objectMap := make(map[string]interface{}) if drufec.Parameters != nil { objectMap["parameters"] = drufec.Parameters } if drufec.Name != "" { objectMap["name"] = drufec.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return &drufec, true } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileExtensionCondition. func (drufec DeliveryRuleURLFileExtensionCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drufec, true } // DeliveryRuleURLFileNameCondition defines the UrlFileName condition for the delivery rule. type DeliveryRuleURLFileNameCondition struct { // Parameters - Defines the parameters for the condition. Parameters *URLFileNameMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) MarshalJSON() ([]byte, error) { drufnc.Name = NameURLFileName objectMap := make(map[string]interface{}) if drufnc.Parameters != nil { objectMap["parameters"] = drufnc.Parameters } if drufnc.Name != "" { objectMap["name"] = drufnc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return nil, false } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return &drufnc, true } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLFileNameCondition. func (drufnc DeliveryRuleURLFileNameCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drufnc, true } // DeliveryRuleURLPathCondition defines the UrlPath condition for the delivery rule. type DeliveryRuleURLPathCondition struct { // Parameters - Defines the parameters for the condition. Parameters *URLPathMatchConditionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleCondition', 'NameRemoteAddress', 'NameRequestMethod', 'NameQueryString', 'NamePostArgs', 'NameRequestURI', 'NameRequestHeader', 'NameRequestBody', 'NameRequestScheme', 'NameURLPath', 'NameURLFileExtension', 'NameURLFileName', 'NameHTTPVersion', 'NameCookies', 'NameIsDevice' Name Name `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) MarshalJSON() ([]byte, error) { drupc.Name = NameURLPath objectMap := make(map[string]interface{}) if drupc.Parameters != nil { objectMap["parameters"] = drupc.Parameters } if drupc.Name != "" { objectMap["name"] = drupc.Name } return json.Marshal(objectMap) } // AsDeliveryRuleRemoteAddressCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRemoteAddressCondition() (*DeliveryRuleRemoteAddressCondition, bool) { return nil, false } // AsDeliveryRuleRequestMethodCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRequestMethodCondition() (*DeliveryRuleRequestMethodCondition, bool) { return nil, false } // AsDeliveryRuleQueryStringCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleQueryStringCondition() (*DeliveryRuleQueryStringCondition, bool) { return nil, false } // AsDeliveryRulePostArgsCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRulePostArgsCondition() (*DeliveryRulePostArgsCondition, bool) { return nil, false } // AsDeliveryRuleRequestURICondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRequestURICondition() (*DeliveryRuleRequestURICondition, bool) { return nil, false } // AsDeliveryRuleRequestHeaderCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRequestHeaderCondition() (*DeliveryRuleRequestHeaderCondition, bool) { return nil, false } // AsDeliveryRuleRequestBodyCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRequestBodyCondition() (*DeliveryRuleRequestBodyCondition, bool) { return nil, false } // AsDeliveryRuleRequestSchemeCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleRequestSchemeCondition() (*DeliveryRuleRequestSchemeCondition, bool) { return nil, false } // AsDeliveryRuleURLPathCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleURLPathCondition() (*DeliveryRuleURLPathCondition, bool) { return &drupc, true } // AsDeliveryRuleURLFileExtensionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleURLFileExtensionCondition() (*DeliveryRuleURLFileExtensionCondition, bool) { return nil, false } // AsDeliveryRuleURLFileNameCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleURLFileNameCondition() (*DeliveryRuleURLFileNameCondition, bool) { return nil, false } // AsDeliveryRuleHTTPVersionCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleHTTPVersionCondition() (*DeliveryRuleHTTPVersionCondition, bool) { return nil, false } // AsDeliveryRuleCookiesCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleCookiesCondition() (*DeliveryRuleCookiesCondition, bool) { return nil, false } // AsDeliveryRuleIsDeviceCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleIsDeviceCondition() (*DeliveryRuleIsDeviceCondition, bool) { return nil, false } // AsDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsDeliveryRuleCondition() (*DeliveryRuleCondition, bool) { return nil, false } // AsBasicDeliveryRuleCondition is the BasicDeliveryRuleCondition implementation for DeliveryRuleURLPathCondition. func (drupc DeliveryRuleURLPathCondition) AsBasicDeliveryRuleCondition() (BasicDeliveryRuleCondition, bool) { return &drupc, true } // EdgeNode edgenode is a global Point of Presence (POP) location used to deliver CDN content to end users. type EdgeNode struct { *EdgeNodeProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for EdgeNode. func (en EdgeNode) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if en.EdgeNodeProperties != nil { objectMap["properties"] = en.EdgeNodeProperties } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for EdgeNode struct. func (en *EdgeNode) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var edgeNodeProperties EdgeNodeProperties err = json.Unmarshal(*v, &edgeNodeProperties) if err != nil { return err } en.EdgeNodeProperties = &edgeNodeProperties } case "id": if v != nil { var ID string err = json.Unmarshal(*v, &ID) if err != nil { return err } en.ID = &ID } case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } en.Name = &name } case "type": if v != nil { var typeVar string err = json.Unmarshal(*v, &typeVar) if err != nil { return err } en.Type = &typeVar } } } return nil } // EdgeNodeProperties the JSON object that contains the properties required to create an edgenode. type EdgeNodeProperties struct { // IPAddressGroups - List of ip address groups. IPAddressGroups *[]IPAddressGroup `json:"ipAddressGroups,omitempty"` } // EdgenodeResult result of the request to list CDN edgenodes. It contains a list of ip address group and a URL // link to get the next set of results. type EdgenodeResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; Edge node of CDN service. Value *[]EdgeNode `json:"value,omitempty"` // NextLink - URL to get the next set of edgenode list results if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for EdgenodeResult. func (er EdgenodeResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if er.NextLink != nil { objectMap["nextLink"] = er.NextLink } return json.Marshal(objectMap) } // EdgenodeResultIterator provides access to a complete listing of EdgeNode values. type EdgenodeResultIterator struct { i int page EdgenodeResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *EdgenodeResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/EdgenodeResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *EdgenodeResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter EdgenodeResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter EdgenodeResultIterator) Response() EdgenodeResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter EdgenodeResultIterator) Value() EdgeNode { if !iter.page.NotDone() { return EdgeNode{} } return iter.page.Values()[iter.i] } // Creates a new instance of the EdgenodeResultIterator type. func NewEdgenodeResultIterator(page EdgenodeResultPage) EdgenodeResultIterator { return EdgenodeResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (er EdgenodeResult) IsEmpty() bool { return er.Value == nil || len(*er.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (er EdgenodeResult) hasNextLink() bool { return er.NextLink != nil && len(*er.NextLink) != 0 } // edgenodeResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (er EdgenodeResult) edgenodeResultPreparer(ctx context.Context) (*http.Request, error) { if !er.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(er.NextLink))) } // EdgenodeResultPage contains a page of EdgeNode values. type EdgenodeResultPage struct { fn func(context.Context, EdgenodeResult) (EdgenodeResult, error) er EdgenodeResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *EdgenodeResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/EdgenodeResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.er) if err != nil { return err } page.er = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *EdgenodeResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page EdgenodeResultPage) NotDone() bool { return !page.er.IsEmpty() } // Response returns the raw server response from the last page request. func (page EdgenodeResultPage) Response() EdgenodeResult { return page.er } // Values returns the slice of values for the current page or nil if there are no values. func (page EdgenodeResultPage) Values() []EdgeNode { if page.er.IsEmpty() { return nil } return *page.er.Value } // Creates a new instance of the EdgenodeResultPage type. func NewEdgenodeResultPage(getNextPage func(context.Context, EdgenodeResult) (EdgenodeResult, error)) EdgenodeResultPage { return EdgenodeResultPage{fn: getNextPage} } // Endpoint CDN endpoint is the entity within a CDN profile containing configuration information such as // origin, protocol, content caching and delivery behavior. The CDN endpoint uses the URL format // <endpointname>.azureedge.net. type Endpoint struct { autorest.Response `json:"-"` *EndpointProperties `json:"properties,omitempty"` // Location - Resource location. Location *string `json:"location,omitempty"` // Tags - Resource tags. Tags map[string]*string `json:"tags"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for Endpoint. func (e Endpoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if e.EndpointProperties != nil { objectMap["properties"] = e.EndpointProperties } if e.Location != nil { objectMap["location"] = e.Location } if e.Tags != nil { objectMap["tags"] = e.Tags } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for Endpoint struct. func (e *Endpoint) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var endpointProperties EndpointProperties err = json.Unmarshal(*v, &endpointProperties) if err != nil { return err } e.EndpointProperties = &endpointProperties } case "location": if v != nil { var location string err = json.Unmarshal(*v, &location) if err != nil { return err } e.Location = &location } case "tags": if v != nil { var tags map[string]*string err = json.Unmarshal(*v, &tags) if err != nil { return err } e.Tags = tags } case "id": if v != nil { var ID string err = json.Unmarshal(*v, &ID) if err != nil { return err } e.ID = &ID } case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } e.Name = &name } case "type": if v != nil { var typeVar string err = json.Unmarshal(*v, &typeVar) if err != nil { return err } e.Type = &typeVar } } } return nil } // EndpointListResult result of the request to list endpoints. It contains a list of endpoint objects and a URL // link to get the next set of results. type EndpointListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of CDN endpoints within a profile Value *[]Endpoint `json:"value,omitempty"` // NextLink - URL to get the next set of endpoint objects if there is any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for EndpointListResult. func (elr EndpointListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if elr.NextLink != nil { objectMap["nextLink"] = elr.NextLink } return json.Marshal(objectMap) } // EndpointListResultIterator provides access to a complete listing of Endpoint values. type EndpointListResultIterator struct { i int page EndpointListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *EndpointListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/EndpointListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *EndpointListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter EndpointListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter EndpointListResultIterator) Response() EndpointListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter EndpointListResultIterator) Value() Endpoint { if !iter.page.NotDone() { return Endpoint{} } return iter.page.Values()[iter.i] } // Creates a new instance of the EndpointListResultIterator type. func NewEndpointListResultIterator(page EndpointListResultPage) EndpointListResultIterator { return EndpointListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (elr EndpointListResult) IsEmpty() bool { return elr.Value == nil || len(*elr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (elr EndpointListResult) hasNextLink() bool { return elr.NextLink != nil && len(*elr.NextLink) != 0 } // endpointListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (elr EndpointListResult) endpointListResultPreparer(ctx context.Context) (*http.Request, error) { if !elr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(elr.NextLink))) } // EndpointListResultPage contains a page of Endpoint values. type EndpointListResultPage struct { fn func(context.Context, EndpointListResult) (EndpointListResult, error) elr EndpointListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *EndpointListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/EndpointListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.elr) if err != nil { return err } page.elr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *EndpointListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page EndpointListResultPage) NotDone() bool { return !page.elr.IsEmpty() } // Response returns the raw server response from the last page request. func (page EndpointListResultPage) Response() EndpointListResult { return page.elr } // Values returns the slice of values for the current page or nil if there are no values. func (page EndpointListResultPage) Values() []Endpoint { if page.elr.IsEmpty() { return nil } return *page.elr.Value } // Creates a new instance of the EndpointListResultPage type. func NewEndpointListResultPage(getNextPage func(context.Context, EndpointListResult) (EndpointListResult, error)) EndpointListResultPage { return EndpointListResultPage{fn: getNextPage} } // EndpointProperties the JSON object that contains the properties required to create an endpoint. type EndpointProperties struct { // HostName - READ-ONLY; The host name of the endpoint structured as {endpointName}.{DNSZone}, e.g. contoso.azureedge.net HostName *string `json:"hostName,omitempty"` // Origins - The source of the content being delivered via CDN. Origins *[]DeepCreatedOrigin `json:"origins,omitempty"` // ResourceState - READ-ONLY; Resource status of the endpoint. Possible values include: 'EndpointResourceStateCreating', 'EndpointResourceStateDeleting', 'EndpointResourceStateRunning', 'EndpointResourceStateStarting', 'EndpointResourceStateStopped', 'EndpointResourceStateStopping' ResourceState EndpointResourceState `json:"resourceState,omitempty"` // ProvisioningState - READ-ONLY; Provisioning status of the endpoint. ProvisioningState *string `json:"provisioningState,omitempty"` // OriginHostHeader - The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default. OriginHostHeader *string `json:"originHostHeader,omitempty"` // OriginPath - A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath. OriginPath *string `json:"originPath,omitempty"` // ContentTypesToCompress - List of content types on which compression applies. The value should be a valid MIME type. ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` // IsCompressionEnabled - Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB. IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` // IsHTTPAllowed - Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed. IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` // IsHTTPSAllowed - Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed. IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` // QueryStringCachingBehavior - Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL. Possible values include: 'IgnoreQueryString', 'BypassCaching', 'UseQueryString', 'NotSet' QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` // OptimizationType - Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization. Possible values include: 'GeneralWebDelivery', 'GeneralMediaStreaming', 'VideoOnDemandMediaStreaming', 'LargeFileDownload', 'DynamicSiteAcceleration' OptimizationType OptimizationType `json:"optimizationType,omitempty"` // ProbePath - Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path. ProbePath *string `json:"probePath,omitempty"` // GeoFilters - List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/ GeoFilters *[]GeoFilter `json:"geoFilters,omitempty"` // DeliveryPolicy - A policy that specifies the delivery rules to be used for an endpoint. DeliveryPolicy *EndpointPropertiesUpdateParametersDeliveryPolicy `json:"deliveryPolicy,omitempty"` } // MarshalJSON is the custom marshaler for EndpointProperties. func (ep EndpointProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if ep.Origins != nil { objectMap["origins"] = ep.Origins } if ep.OriginHostHeader != nil { objectMap["originHostHeader"] = ep.OriginHostHeader } if ep.OriginPath != nil { objectMap["originPath"] = ep.OriginPath } if ep.ContentTypesToCompress != nil { objectMap["contentTypesToCompress"] = ep.ContentTypesToCompress } if ep.IsCompressionEnabled != nil { objectMap["isCompressionEnabled"] = ep.IsCompressionEnabled } if ep.IsHTTPAllowed != nil { objectMap["isHttpAllowed"] = ep.IsHTTPAllowed } if ep.IsHTTPSAllowed != nil { objectMap["isHttpsAllowed"] = ep.IsHTTPSAllowed } if ep.QueryStringCachingBehavior != "" { objectMap["queryStringCachingBehavior"] = ep.QueryStringCachingBehavior } if ep.OptimizationType != "" { objectMap["optimizationType"] = ep.OptimizationType } if ep.ProbePath != nil { objectMap["probePath"] = ep.ProbePath } if ep.GeoFilters != nil { objectMap["geoFilters"] = ep.GeoFilters } if ep.DeliveryPolicy != nil { objectMap["deliveryPolicy"] = ep.DeliveryPolicy } return json.Marshal(objectMap) } // EndpointPropertiesUpdateParameters the JSON object containing endpoint update parameters. type EndpointPropertiesUpdateParameters struct { // OriginHostHeader - The host header value sent to the origin with each request. If you leave this blank, the request hostname determines this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this host header value to match the origin hostname by default. OriginHostHeader *string `json:"originHostHeader,omitempty"` // OriginPath - A directory path on the origin that CDN can use to retrieve content from, e.g. contoso.cloudapp.net/originpath. OriginPath *string `json:"originPath,omitempty"` // ContentTypesToCompress - List of content types on which compression applies. The value should be a valid MIME type. ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` // IsCompressionEnabled - Indicates whether content compression is enabled on CDN. Default value is false. If compression is enabled, content will be served as compressed if user requests for a compressed version. Content won't be compressed on CDN when requested content is smaller than 1 byte or larger than 1 MB. IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` // IsHTTPAllowed - Indicates whether HTTP traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed. IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` // IsHTTPSAllowed - Indicates whether HTTPS traffic is allowed on the endpoint. Default value is true. At least one protocol (HTTP or HTTPS) must be allowed. IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` // QueryStringCachingBehavior - Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL. Possible values include: 'IgnoreQueryString', 'BypassCaching', 'UseQueryString', 'NotSet' QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` // OptimizationType - Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization. Possible values include: 'GeneralWebDelivery', 'GeneralMediaStreaming', 'VideoOnDemandMediaStreaming', 'LargeFileDownload', 'DynamicSiteAcceleration' OptimizationType OptimizationType `json:"optimizationType,omitempty"` // ProbePath - Path to a file hosted on the origin which helps accelerate delivery of the dynamic content and calculate the most optimal routes for the CDN. This is relative to the origin path. ProbePath *string `json:"probePath,omitempty"` // GeoFilters - List of rules defining the user's geo access within a CDN endpoint. Each geo filter defines an access rule to a specified path or content, e.g. block APAC for path /pictures/ GeoFilters *[]GeoFilter `json:"geoFilters,omitempty"` // DeliveryPolicy - A policy that specifies the delivery rules to be used for an endpoint. DeliveryPolicy *EndpointPropertiesUpdateParametersDeliveryPolicy `json:"deliveryPolicy,omitempty"` } // EndpointPropertiesUpdateParametersDeliveryPolicy a policy that specifies the delivery rules to be used for // an endpoint. type EndpointPropertiesUpdateParametersDeliveryPolicy struct { // Description - User-friendly description of the policy. Description *string `json:"description,omitempty"` // Rules - A list of the delivery rules. Rules *[]DeliveryRule `json:"rules,omitempty"` } // EndpointsCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type EndpointsCreateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsCreateFuture) Result(client EndpointsClient) (e Endpoint, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsCreateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsCreateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if e.Response.Response, err = future.GetResult(sender); err == nil && e.Response.Response.StatusCode != http.StatusNoContent { e, err = client.CreateResponder(e.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsCreateFuture", "Result", e.Response.Response, "Failure responding to request") } } return } // EndpointsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type EndpointsDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsDeleteFuture) Result(client EndpointsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsDeleteFuture") return } ar.Response = future.Response() return } // EndpointsLoadContentFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type EndpointsLoadContentFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsLoadContentFuture) Result(client EndpointsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsLoadContentFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsLoadContentFuture") return } ar.Response = future.Response() return } // EndpointsPurgeContentFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type EndpointsPurgeContentFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsPurgeContentFuture) Result(client EndpointsClient) (ar autorest.Response, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsPurgeContentFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsPurgeContentFuture") return } ar.Response = future.Response() return } // EndpointsStartFuture an abstraction for monitoring and retrieving the results of a long-running operation. type EndpointsStartFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsStartFuture) Result(client EndpointsClient) (e Endpoint, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsStartFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsStartFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if e.Response.Response, err = future.GetResult(sender); err == nil && e.Response.Response.StatusCode != http.StatusNoContent { e, err = client.StartResponder(e.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsStartFuture", "Result", e.Response.Response, "Failure responding to request") } } return } // EndpointsStopFuture an abstraction for monitoring and retrieving the results of a long-running operation. type EndpointsStopFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsStopFuture) Result(client EndpointsClient) (e Endpoint, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsStopFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsStopFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if e.Response.Response, err = future.GetResult(sender); err == nil && e.Response.Response.StatusCode != http.StatusNoContent { e, err = client.StopResponder(e.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsStopFuture", "Result", e.Response.Response, "Failure responding to request") } } return } // EndpointsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type EndpointsUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *EndpointsUpdateFuture) Result(client EndpointsClient) (e Endpoint, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.EndpointsUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if e.Response.Response, err = future.GetResult(sender); err == nil && e.Response.Response.StatusCode != http.StatusNoContent { e, err = client.UpdateResponder(e.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.EndpointsUpdateFuture", "Result", e.Response.Response, "Failure responding to request") } } return } // EndpointUpdateParameters properties required to create or update an endpoint. type EndpointUpdateParameters struct { // Tags - Endpoint tags. Tags map[string]*string `json:"tags"` *EndpointPropertiesUpdateParameters `json:"properties,omitempty"` } // MarshalJSON is the custom marshaler for EndpointUpdateParameters. func (eup EndpointUpdateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if eup.Tags != nil { objectMap["tags"] = eup.Tags } if eup.EndpointPropertiesUpdateParameters != nil { objectMap["properties"] = eup.EndpointPropertiesUpdateParameters } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for EndpointUpdateParameters struct. func (eup *EndpointUpdateParameters) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "tags": if v != nil { var tags map[string]*string err = json.Unmarshal(*v, &tags) if err != nil { return err } eup.Tags = tags } case "properties": if v != nil { var endpointPropertiesUpdateParameters EndpointPropertiesUpdateParameters err = json.Unmarshal(*v, &endpointPropertiesUpdateParameters) if err != nil { return err } eup.EndpointPropertiesUpdateParameters = &endpointPropertiesUpdateParameters } } } return nil } // ErrorResponse error response indicates CDN service is not able to process the incoming request. The reason // is provided in the error message. type ErrorResponse struct { // Code - READ-ONLY; Error code. Code *string `json:"code,omitempty"` // Message - READ-ONLY; Error message indicating why the operation failed. Message *string `json:"message,omitempty"` } // GeoFilter rules defining user's geo access within a CDN endpoint. type GeoFilter struct { // RelativePath - Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.) RelativePath *string `json:"relativePath,omitempty"` // Action - Action of the geo filter, i.e. allow or block access. Possible values include: 'Block', 'Allow' Action GeoFilterActions `json:"action,omitempty"` // CountryCodes - Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US. CountryCodes *[]string `json:"countryCodes,omitempty"` } // HeaderActionParameters defines the parameters for the request header action. type HeaderActionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // HeaderAction - Action to perform. Possible values include: 'Append', 'Overwrite', 'Delete' HeaderAction HeaderAction `json:"headerAction,omitempty"` // HeaderName - Name of the header to modify HeaderName *string `json:"headerName,omitempty"` // Value - Value for the specified action Value *string `json:"value,omitempty"` } // HTTPVersionMatchConditionParameters defines the parameters for HttpVersion match conditions type HTTPVersionMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched Operator *string `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` } // IPAddressGroup CDN Ip address group type IPAddressGroup struct { // DeliveryRegion - The delivery region of the ip address group DeliveryRegion *string `json:"deliveryRegion,omitempty"` // Ipv4Addresses - The list of ip v4 addresses. Ipv4Addresses *[]CidrIPAddress `json:"ipv4Addresses,omitempty"` // Ipv6Addresses - The list of ip v6 addresses. Ipv6Addresses *[]CidrIPAddress `json:"ipv6Addresses,omitempty"` } // IsDeviceMatchConditionParameters defines the parameters for IsDevice match conditions type IsDeviceMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched Operator *string `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // KeyVaultCertificateSourceParameters describes the parameters for using a user's KeyVault certificate for // securing custom domain. type KeyVaultCertificateSourceParameters struct { OdataType *string `json:"@odata.type,omitempty"` // SubscriptionID - Subscription Id of the user's Key Vault containing the SSL certificate SubscriptionID *string `json:"subscriptionId,omitempty"` // ResourceGroupName - Resource group of the user's Key Vault containing the SSL certificate ResourceGroupName *string `json:"resourceGroupName,omitempty"` // VaultName - The name of the user's Key Vault containing the SSL certificate VaultName *string `json:"vaultName,omitempty"` // SecretName - The name of Key Vault Secret (representing the full certificate PFX) in Key Vault. SecretName *string `json:"secretName,omitempty"` // SecretVersion - The version(GUID) of Key Vault Secret in Key Vault. SecretVersion *string `json:"secretVersion,omitempty"` // UpdateRule - Describes the action that shall be taken when the certificate is updated in Key Vault. UpdateRule *string `json:"updateRule,omitempty"` // DeleteRule - Describes the action that shall be taken when the certificate is removed from Key Vault. DeleteRule *string `json:"deleteRule,omitempty"` } // LoadParameters parameters required for content load. type LoadParameters struct { // ContentPaths - The path to the content to be loaded. Path should be a relative file URL of the origin. ContentPaths *[]string `json:"contentPaths,omitempty"` } // ManagedHTTPSParameters defines the certificate source parameters using CDN managed certificate for enabling // SSL. type ManagedHTTPSParameters struct { // CertificateSourceParameters - Defines the certificate source parameters using CDN managed certificate for enabling SSL. CertificateSourceParameters *CertificateSourceParameters `json:"certificateSourceParameters,omitempty"` // ProtocolType - Defines the TLS extension protocol that is used for secure delivery. Possible values include: 'ServerNameIndication', 'IPBased' ProtocolType ProtocolType `json:"protocolType,omitempty"` // MinimumTLSVersion - TLS protocol version that will be used for Https. Possible values include: 'None', 'TLS10', 'TLS12' MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` // CertificateSource - Possible values include: 'CertificateSourceCustomDomainHTTPSParameters', 'CertificateSourceCdn', 'CertificateSourceAzureKeyVault' CertificateSource CertificateSource `json:"certificateSource,omitempty"` } // MarshalJSON is the custom marshaler for ManagedHTTPSParameters. func (mhp ManagedHTTPSParameters) MarshalJSON() ([]byte, error) { mhp.CertificateSource = CertificateSourceCdn objectMap := make(map[string]interface{}) if mhp.CertificateSourceParameters != nil { objectMap["certificateSourceParameters"] = mhp.CertificateSourceParameters } if mhp.ProtocolType != "" { objectMap["protocolType"] = mhp.ProtocolType } if mhp.MinimumTLSVersion != "" { objectMap["minimumTlsVersion"] = mhp.MinimumTLSVersion } if mhp.CertificateSource != "" { objectMap["certificateSource"] = mhp.CertificateSource } return json.Marshal(objectMap) } // AsManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for ManagedHTTPSParameters. func (mhp ManagedHTTPSParameters) AsManagedHTTPSParameters() (*ManagedHTTPSParameters, bool) { return &mhp, true } // AsUserManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for ManagedHTTPSParameters. func (mhp ManagedHTTPSParameters) AsUserManagedHTTPSParameters() (*UserManagedHTTPSParameters, bool) { return nil, false } // AsCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for ManagedHTTPSParameters. func (mhp ManagedHTTPSParameters) AsCustomDomainHTTPSParameters() (*CustomDomainHTTPSParameters, bool) { return nil, false } // AsBasicCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for ManagedHTTPSParameters. func (mhp ManagedHTTPSParameters) AsBasicCustomDomainHTTPSParameters() (BasicCustomDomainHTTPSParameters, bool) { return &mhp, true } // Operation CDN REST API operation type Operation struct { // Name - READ-ONLY; Operation name: {provider}/{resource}/{operation} Name *string `json:"name,omitempty"` // Display - The object that represents the operation. Display *OperationDisplay `json:"display,omitempty"` } // MarshalJSON is the custom marshaler for Operation. func (o Operation) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if o.Display != nil { objectMap["display"] = o.Display } return json.Marshal(objectMap) } // OperationDisplay the object that represents the operation. type OperationDisplay struct { // Provider - READ-ONLY; Service provider: Microsoft.Cdn Provider *string `json:"provider,omitempty"` // Resource - READ-ONLY; Resource on which the operation is performed: Profile, endpoint, etc. Resource *string `json:"resource,omitempty"` // Operation - READ-ONLY; Operation type: Read, write, delete, etc. Operation *string `json:"operation,omitempty"` } // OperationsListResult result of the request to list CDN operations. It contains a list of operations and a // URL link to get the next set of results. type OperationsListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of CDN operations supported by the CDN resource provider. Value *[]Operation `json:"value,omitempty"` // NextLink - URL to get the next set of operation list results if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for OperationsListResult. func (olr OperationsListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if olr.NextLink != nil { objectMap["nextLink"] = olr.NextLink } return json.Marshal(objectMap) } // OperationsListResultIterator provides access to a complete listing of Operation values. type OperationsListResultIterator struct { i int page OperationsListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *OperationsListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *OperationsListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter OperationsListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter OperationsListResultIterator) Response() OperationsListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter OperationsListResultIterator) Value() Operation { if !iter.page.NotDone() { return Operation{} } return iter.page.Values()[iter.i] } // Creates a new instance of the OperationsListResultIterator type. func NewOperationsListResultIterator(page OperationsListResultPage) OperationsListResultIterator { return OperationsListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (olr OperationsListResult) IsEmpty() bool { return olr.Value == nil || len(*olr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (olr OperationsListResult) hasNextLink() bool { return olr.NextLink != nil && len(*olr.NextLink) != 0 } // operationsListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (olr OperationsListResult) operationsListResultPreparer(ctx context.Context) (*http.Request, error) { if !olr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(olr.NextLink))) } // OperationsListResultPage contains a page of Operation values. type OperationsListResultPage struct { fn func(context.Context, OperationsListResult) (OperationsListResult, error) olr OperationsListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *OperationsListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OperationsListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.olr) if err != nil { return err } page.olr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *OperationsListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page OperationsListResultPage) NotDone() bool { return !page.olr.IsEmpty() } // Response returns the raw server response from the last page request. func (page OperationsListResultPage) Response() OperationsListResult { return page.olr } // Values returns the slice of values for the current page or nil if there are no values. func (page OperationsListResultPage) Values() []Operation { if page.olr.IsEmpty() { return nil } return *page.olr.Value } // Creates a new instance of the OperationsListResultPage type. func NewOperationsListResultPage(getNextPage func(context.Context, OperationsListResult) (OperationsListResult, error)) OperationsListResultPage { return OperationsListResultPage{fn: getNextPage} } // Origin CDN origin is the source of the content being delivered via CDN. When the edge nodes represented by // an endpoint do not have the requested content cached, they attempt to fetch it from one or more of the // configured origins. type Origin struct { autorest.Response `json:"-"` *OriginProperties `json:"properties,omitempty"` // Location - Resource location. Location *string `json:"location,omitempty"` // Tags - Resource tags. Tags map[string]*string `json:"tags"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for Origin. func (o Origin) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if o.OriginProperties != nil { objectMap["properties"] = o.OriginProperties } if o.Location != nil { objectMap["location"] = o.Location } if o.Tags != nil { objectMap["tags"] = o.Tags } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for Origin struct. func (o *Origin) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var originProperties OriginProperties err = json.Unmarshal(*v, &originProperties) if err != nil { return err } o.OriginProperties = &originProperties } case "location": if v != nil { var location string err = json.Unmarshal(*v, &location) if err != nil { return err } o.Location = &location } case "tags": if v != nil { var tags map[string]*string err = json.Unmarshal(*v, &tags) if err != nil { return err } o.Tags = tags } case "id": if v != nil { var ID string err = json.Unmarshal(*v, &ID) if err != nil { return err } o.ID = &ID } case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } o.Name = &name } case "type": if v != nil { var typeVar string err = json.Unmarshal(*v, &typeVar) if err != nil { return err } o.Type = &typeVar } } } return nil } // OriginListResult result of the request to list origins. It contains a list of origin objects and a URL link // to get the next set of results. type OriginListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of CDN origins within an endpoint Value *[]Origin `json:"value,omitempty"` // NextLink - URL to get the next set of origin objects if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for OriginListResult. func (olr OriginListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if olr.NextLink != nil { objectMap["nextLink"] = olr.NextLink } return json.Marshal(objectMap) } // OriginListResultIterator provides access to a complete listing of Origin values. type OriginListResultIterator struct { i int page OriginListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *OriginListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OriginListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *OriginListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter OriginListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter OriginListResultIterator) Response() OriginListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter OriginListResultIterator) Value() Origin { if !iter.page.NotDone() { return Origin{} } return iter.page.Values()[iter.i] } // Creates a new instance of the OriginListResultIterator type. func NewOriginListResultIterator(page OriginListResultPage) OriginListResultIterator { return OriginListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (olr OriginListResult) IsEmpty() bool { return olr.Value == nil || len(*olr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (olr OriginListResult) hasNextLink() bool { return olr.NextLink != nil && len(*olr.NextLink) != 0 } // originListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (olr OriginListResult) originListResultPreparer(ctx context.Context) (*http.Request, error) { if !olr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(olr.NextLink))) } // OriginListResultPage contains a page of Origin values. type OriginListResultPage struct { fn func(context.Context, OriginListResult) (OriginListResult, error) olr OriginListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *OriginListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/OriginListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.olr) if err != nil { return err } page.olr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *OriginListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page OriginListResultPage) NotDone() bool { return !page.olr.IsEmpty() } // Response returns the raw server response from the last page request. func (page OriginListResultPage) Response() OriginListResult { return page.olr } // Values returns the slice of values for the current page or nil if there are no values. func (page OriginListResultPage) Values() []Origin { if page.olr.IsEmpty() { return nil } return *page.olr.Value } // Creates a new instance of the OriginListResultPage type. func NewOriginListResultPage(getNextPage func(context.Context, OriginListResult) (OriginListResult, error)) OriginListResultPage { return OriginListResultPage{fn: getNextPage} } // OriginProperties the JSON object that contains the properties of the origin. type OriginProperties struct { // HostName - The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses are supported. HostName *string `json:"hostName,omitempty"` // HTTPPort - The value of the HTTP port. Must be between 1 and 65535. HTTPPort *int32 `json:"httpPort,omitempty"` // HTTPSPort - The value of the https port. Must be between 1 and 65535. HTTPSPort *int32 `json:"httpsPort,omitempty"` // ResourceState - READ-ONLY; Resource status of the origin. Possible values include: 'OriginResourceStateCreating', 'OriginResourceStateActive', 'OriginResourceStateDeleting' ResourceState OriginResourceState `json:"resourceState,omitempty"` // ProvisioningState - READ-ONLY; Provisioning status of the origin. ProvisioningState *string `json:"provisioningState,omitempty"` } // MarshalJSON is the custom marshaler for OriginProperties. func (op OriginProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if op.HostName != nil { objectMap["hostName"] = op.HostName } if op.HTTPPort != nil { objectMap["httpPort"] = op.HTTPPort } if op.HTTPSPort != nil { objectMap["httpsPort"] = op.HTTPSPort } return json.Marshal(objectMap) } // OriginPropertiesParameters the JSON object that contains the properties of the origin. type OriginPropertiesParameters struct { // HostName - The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses are supported. HostName *string `json:"hostName,omitempty"` // HTTPPort - The value of the HTTP port. Must be between 1 and 65535. HTTPPort *int32 `json:"httpPort,omitempty"` // HTTPSPort - The value of the HTTPS port. Must be between 1 and 65535. HTTPSPort *int32 `json:"httpsPort,omitempty"` } // OriginsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type OriginsUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *OriginsUpdateFuture) Result(client OriginsClient) (o Origin, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.OriginsUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.OriginsUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if o.Response.Response, err = future.GetResult(sender); err == nil && o.Response.Response.StatusCode != http.StatusNoContent { o, err = client.UpdateResponder(o.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.OriginsUpdateFuture", "Result", o.Response.Response, "Failure responding to request") } } return } // OriginUpdateParameters origin properties needed for origin creation or update. type OriginUpdateParameters struct { *OriginPropertiesParameters `json:"properties,omitempty"` } // MarshalJSON is the custom marshaler for OriginUpdateParameters. func (oup OriginUpdateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if oup.OriginPropertiesParameters != nil { objectMap["properties"] = oup.OriginPropertiesParameters } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for OriginUpdateParameters struct. func (oup *OriginUpdateParameters) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "properties": if v != nil { var originPropertiesParameters OriginPropertiesParameters err = json.Unmarshal(*v, &originPropertiesParameters) if err != nil { return err } oup.OriginPropertiesParameters = &originPropertiesParameters } } } return nil } // PostArgsMatchConditionParameters defines the parameters for PostArgs match conditions type PostArgsMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Selector - Name of PostArg to be matched Selector *string `json:"selector,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'PostArgsOperatorAny', 'PostArgsOperatorEqual', 'PostArgsOperatorContains', 'PostArgsOperatorBeginsWith', 'PostArgsOperatorEndsWith', 'PostArgsOperatorLessThan', 'PostArgsOperatorLessThanOrEqual', 'PostArgsOperatorGreaterThan', 'PostArgsOperatorGreaterThanOrEqual' Operator PostArgsOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // Profile CDN profile is a logical grouping of endpoints that share the same settings, such as CDN provider // and pricing tier. type Profile struct { autorest.Response `json:"-"` // Sku - The pricing tier (defines a CDN provider, feature list and rate) of the CDN profile. Sku *Sku `json:"sku,omitempty"` *ProfileProperties `json:"properties,omitempty"` // Location - Resource location. Location *string `json:"location,omitempty"` // Tags - Resource tags. Tags map[string]*string `json:"tags"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for Profile. func (p Profile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if p.Sku != nil { objectMap["sku"] = p.Sku } if p.ProfileProperties != nil { objectMap["properties"] = p.ProfileProperties } if p.Location != nil { objectMap["location"] = p.Location } if p.Tags != nil { objectMap["tags"] = p.Tags } return json.Marshal(objectMap) } // UnmarshalJSON is the custom unmarshaler for Profile struct. func (p *Profile) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { return err } for k, v := range m { switch k { case "sku": if v != nil { var sku Sku err = json.Unmarshal(*v, &sku) if err != nil { return err } p.Sku = &sku } case "properties": if v != nil { var profileProperties ProfileProperties err = json.Unmarshal(*v, &profileProperties) if err != nil { return err } p.ProfileProperties = &profileProperties } case "location": if v != nil { var location string err = json.Unmarshal(*v, &location) if err != nil { return err } p.Location = &location } case "tags": if v != nil { var tags map[string]*string err = json.Unmarshal(*v, &tags) if err != nil { return err } p.Tags = tags } case "id": if v != nil { var ID string err = json.Unmarshal(*v, &ID) if err != nil { return err } p.ID = &ID } case "name": if v != nil { var name string err = json.Unmarshal(*v, &name) if err != nil { return err } p.Name = &name } case "type": if v != nil { var typeVar string err = json.Unmarshal(*v, &typeVar) if err != nil { return err } p.Type = &typeVar } } } return nil } // ProfileListResult result of the request to list profiles. It contains a list of profile objects and a URL // link to get the next set of results. type ProfileListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of CDN profiles within a resource group. Value *[]Profile `json:"value,omitempty"` // NextLink - URL to get the next set of profile objects if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for ProfileListResult. func (plr ProfileListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if plr.NextLink != nil { objectMap["nextLink"] = plr.NextLink } return json.Marshal(objectMap) } // ProfileListResultIterator provides access to a complete listing of Profile values. type ProfileListResultIterator struct { i int page ProfileListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *ProfileListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ProfileListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *ProfileListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter ProfileListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter ProfileListResultIterator) Response() ProfileListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter ProfileListResultIterator) Value() Profile { if !iter.page.NotDone() { return Profile{} } return iter.page.Values()[iter.i] } // Creates a new instance of the ProfileListResultIterator type. func NewProfileListResultIterator(page ProfileListResultPage) ProfileListResultIterator { return ProfileListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (plr ProfileListResult) IsEmpty() bool { return plr.Value == nil || len(*plr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (plr ProfileListResult) hasNextLink() bool { return plr.NextLink != nil && len(*plr.NextLink) != 0 } // profileListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (plr ProfileListResult) profileListResultPreparer(ctx context.Context) (*http.Request, error) { if !plr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(plr.NextLink))) } // ProfileListResultPage contains a page of Profile values. type ProfileListResultPage struct { fn func(context.Context, ProfileListResult) (ProfileListResult, error) plr ProfileListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *ProfileListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ProfileListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.plr) if err != nil { return err } page.plr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *ProfileListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page ProfileListResultPage) NotDone() bool { return !page.plr.IsEmpty() } // Response returns the raw server response from the last page request. func (page ProfileListResultPage) Response() ProfileListResult { return page.plr } // Values returns the slice of values for the current page or nil if there are no values. func (page ProfileListResultPage) Values() []Profile { if page.plr.IsEmpty() { return nil } return *page.plr.Value } // Creates a new instance of the ProfileListResultPage type. func NewProfileListResultPage(getNextPage func(context.Context, ProfileListResult) (ProfileListResult, error)) ProfileListResultPage { return ProfileListResultPage{fn: getNextPage} } // ProfileProperties the JSON object that contains the properties required to create a profile. type ProfileProperties struct { // ResourceState - READ-ONLY; Resource status of the profile. Possible values include: 'ProfileResourceStateCreating', 'ProfileResourceStateActive', 'ProfileResourceStateDeleting', 'ProfileResourceStateDisabled' ResourceState ProfileResourceState `json:"resourceState,omitempty"` // ProvisioningState - READ-ONLY; Provisioning status of the profile. ProvisioningState *string `json:"provisioningState,omitempty"` } // ProfilesCreateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type ProfilesCreateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *ProfilesCreateFuture) Result(client ProfilesClient) (p Profile, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.ProfilesCreateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.ProfilesCreateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if p.Response.Response, err = future.GetResult(sender); err == nil && p.Response.Response.StatusCode != http.StatusNoContent { p, err = client.CreateResponder(p.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.ProfilesCreateFuture", "Result", p.Response.Response, "Failure responding to request") } } return } // ProfilesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. type ProfilesDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation.
var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.ProfilesDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.ProfilesDeleteFuture") return } ar.Response = future.Response() return } // ProfilesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. type ProfilesUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. func (future *ProfilesUpdateFuture) Result(client ProfilesClient) (p Profile, err error) { var done bool done, err = future.DoneWithContext(context.Background(), client) if err != nil { err = autorest.NewErrorWithError(err, "cdn.ProfilesUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { err = azure.NewAsyncOpIncompleteError("cdn.ProfilesUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) if p.Response.Response, err = future.GetResult(sender); err == nil && p.Response.Response.StatusCode != http.StatusNoContent { p, err = client.UpdateResponder(p.Response.Response) if err != nil { err = autorest.NewErrorWithError(err, "cdn.ProfilesUpdateFuture", "Result", p.Response.Response, "Failure responding to request") } } return } // ProfileUpdateParameters properties required to update a profile. type ProfileUpdateParameters struct { // Tags - Profile tags Tags map[string]*string `json:"tags"` } // MarshalJSON is the custom marshaler for ProfileUpdateParameters. func (pup ProfileUpdateParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if pup.Tags != nil { objectMap["tags"] = pup.Tags } return json.Marshal(objectMap) } // ProxyResource the resource model definition for a ARM proxy resource. It will have everything other than // required location and tags type ProxyResource struct { // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // PurgeParameters parameters required for content purge. type PurgeParameters struct { // ContentPaths - The path to the content to be purged. Can describe a file path or a wild card directory. ContentPaths *[]string `json:"contentPaths,omitempty"` } // QueryStringMatchConditionParameters defines the parameters for QueryString match conditions type QueryStringMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'QueryStringOperatorAny', 'QueryStringOperatorEqual', 'QueryStringOperatorContains', 'QueryStringOperatorBeginsWith', 'QueryStringOperatorEndsWith', 'QueryStringOperatorLessThan', 'QueryStringOperatorLessThanOrEqual', 'QueryStringOperatorGreaterThan', 'QueryStringOperatorGreaterThanOrEqual' Operator QueryStringOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // RemoteAddressMatchConditionParameters defines the parameters for RemoteAddress match conditions type RemoteAddressMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'RemoteAddressOperatorAny', 'RemoteAddressOperatorIPMatch', 'RemoteAddressOperatorGeoMatch' Operator RemoteAddressOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - Match values to match against. The operator will apply to each value in here with OR semantics. If any of them match the variable with the given operator this match condition is considered a match. MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // RequestBodyMatchConditionParameters defines the parameters for RequestBody match conditions type RequestBodyMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'RequestBodyOperatorAny', 'RequestBodyOperatorEqual', 'RequestBodyOperatorContains', 'RequestBodyOperatorBeginsWith', 'RequestBodyOperatorEndsWith', 'RequestBodyOperatorLessThan', 'RequestBodyOperatorLessThanOrEqual', 'RequestBodyOperatorGreaterThan', 'RequestBodyOperatorGreaterThanOrEqual' Operator RequestBodyOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // RequestHeaderMatchConditionParameters defines the parameters for RequestHeader match conditions type RequestHeaderMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Selector - Name of Header to be matched Selector *string `json:"selector,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'RequestHeaderOperatorAny', 'RequestHeaderOperatorEqual', 'RequestHeaderOperatorContains', 'RequestHeaderOperatorBeginsWith', 'RequestHeaderOperatorEndsWith', 'RequestHeaderOperatorLessThan', 'RequestHeaderOperatorLessThanOrEqual', 'RequestHeaderOperatorGreaterThan', 'RequestHeaderOperatorGreaterThanOrEqual' Operator RequestHeaderOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // RequestMethodMatchConditionParameters defines the parameters for RequestMethod match conditions type RequestMethodMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched Operator *string `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` } // RequestSchemeMatchConditionParameters defines the parameters for RequestScheme match conditions type RequestSchemeMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched Operator *string `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` } // RequestURIMatchConditionParameters defines the parameters for RequestUri match conditions type RequestURIMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'RequestURIOperatorAny', 'RequestURIOperatorEqual', 'RequestURIOperatorContains', 'RequestURIOperatorBeginsWith', 'RequestURIOperatorEndsWith', 'RequestURIOperatorLessThan', 'RequestURIOperatorLessThanOrEqual', 'RequestURIOperatorGreaterThan', 'RequestURIOperatorGreaterThanOrEqual' Operator RequestURIOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // Resource the core properties of ARM resources type Resource struct { // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // ResourceUsage output of check resource usage API. type ResourceUsage struct { // ResourceType - READ-ONLY; Resource type for which the usage is provided. ResourceType *string `json:"resourceType,omitempty"` // Unit - READ-ONLY; Unit of the usage. e.g. Count. Unit *string `json:"unit,omitempty"` // CurrentValue - READ-ONLY; Actual value of usage on the specified resource type. CurrentValue *int32 `json:"currentValue,omitempty"` // Limit - READ-ONLY; Quota of the specified resource type. Limit *int32 `json:"limit,omitempty"` } // ResourceUsageListResult output of check resource usage API. type ResourceUsageListResult struct { autorest.Response `json:"-"` // Value - READ-ONLY; List of resource usages. Value *[]ResourceUsage `json:"value,omitempty"` // NextLink - URL to get the next set of custom domain objects if there are any. NextLink *string `json:"nextLink,omitempty"` } // MarshalJSON is the custom marshaler for ResourceUsageListResult. func (rulr ResourceUsageListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if rulr.NextLink != nil { objectMap["nextLink"] = rulr.NextLink } return json.Marshal(objectMap) } // ResourceUsageListResultIterator provides access to a complete listing of ResourceUsage values. type ResourceUsageListResultIterator struct { i int page ResourceUsageListResultPage } // NextWithContext advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. func (iter *ResourceUsageListResultIterator) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceUsageListResultIterator.NextWithContext") defer func() { sc := -1 if iter.Response().Response.Response != nil { sc = iter.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } iter.i++ if iter.i < len(iter.page.Values()) { return nil } err = iter.page.NextWithContext(ctx) if err != nil { iter.i-- return err } iter.i = 0 return nil } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (iter *ResourceUsageListResultIterator) Next() error { return iter.NextWithContext(context.Background()) } // NotDone returns true if the enumeration should be started or is not yet complete. func (iter ResourceUsageListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. func (iter ResourceUsageListResultIterator) Response() ResourceUsageListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. func (iter ResourceUsageListResultIterator) Value() ResourceUsage { if !iter.page.NotDone() { return ResourceUsage{} } return iter.page.Values()[iter.i] } // Creates a new instance of the ResourceUsageListResultIterator type. func NewResourceUsageListResultIterator(page ResourceUsageListResultPage) ResourceUsageListResultIterator { return ResourceUsageListResultIterator{page: page} } // IsEmpty returns true if the ListResult contains no values. func (rulr ResourceUsageListResult) IsEmpty() bool { return rulr.Value == nil || len(*rulr.Value) == 0 } // hasNextLink returns true if the NextLink is not empty. func (rulr ResourceUsageListResult) hasNextLink() bool { return rulr.NextLink != nil && len(*rulr.NextLink) != 0 } // resourceUsageListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. func (rulr ResourceUsageListResult) resourceUsageListResultPreparer(ctx context.Context) (*http.Request, error) { if !rulr.hasNextLink() { return nil, nil } return autorest.Prepare((&http.Request{}).WithContext(ctx), autorest.AsJSON(), autorest.AsGet(), autorest.WithBaseURL(to.String(rulr.NextLink))) } // ResourceUsageListResultPage contains a page of ResourceUsage values. type ResourceUsageListResultPage struct { fn func(context.Context, ResourceUsageListResult) (ResourceUsageListResult, error) rulr ResourceUsageListResult } // NextWithContext advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. func (page *ResourceUsageListResultPage) NextWithContext(ctx context.Context) (err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceUsageListResultPage.NextWithContext") defer func() { sc := -1 if page.Response().Response.Response != nil { sc = page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } for { next, err := page.fn(ctx, page.rulr) if err != nil { return err } page.rulr = next if !next.hasNextLink() || !next.IsEmpty() { break } } return nil } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. // Deprecated: Use NextWithContext() instead. func (page *ResourceUsageListResultPage) Next() error { return page.NextWithContext(context.Background()) } // NotDone returns true if the page enumeration should be started or is not yet complete. func (page ResourceUsageListResultPage) NotDone() bool { return !page.rulr.IsEmpty() } // Response returns the raw server response from the last page request. func (page ResourceUsageListResultPage) Response() ResourceUsageListResult { return page.rulr } // Values returns the slice of values for the current page or nil if there are no values. func (page ResourceUsageListResultPage) Values() []ResourceUsage { if page.rulr.IsEmpty() { return nil } return *page.rulr.Value } // Creates a new instance of the ResourceUsageListResultPage type. func NewResourceUsageListResultPage(getNextPage func(context.Context, ResourceUsageListResult) (ResourceUsageListResult, error)) ResourceUsageListResultPage { return ResourceUsageListResultPage{fn: getNextPage} } // Sku the pricing tier (defines a CDN provider, feature list and rate) of the CDN profile. type Sku struct { // Name - Name of the pricing tier. Possible values include: 'StandardVerizon', 'PremiumVerizon', 'CustomVerizon', 'StandardAkamai', 'StandardChinaCdn', 'StandardMicrosoft', 'PremiumChinaCdn' Name SkuName `json:"name,omitempty"` } // SsoURI the URI required to login to the supplemental portal from the Azure portal. type SsoURI struct { autorest.Response `json:"-"` // SsoURIValue - READ-ONLY; The URI used to login to the supplemental portal. SsoURIValue *string `json:"ssoUriValue,omitempty"` } // SupportedOptimizationTypesListResult the result of the GetSupportedOptimizationTypes API type SupportedOptimizationTypesListResult struct { autorest.Response `json:"-"` // SupportedOptimizationTypes - READ-ONLY; Supported optimization types for a profile. SupportedOptimizationTypes *[]OptimizationType `json:"supportedOptimizationTypes,omitempty"` } // TrackedResource the resource model definition for a ARM tracked top level resource. type TrackedResource struct { // Location - Resource location. Location *string `json:"location,omitempty"` // Tags - Resource tags. Tags map[string]*string `json:"tags"` // ID - READ-ONLY; Resource ID. ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name. Name *string `json:"name,omitempty"` // Type - READ-ONLY; Resource type. Type *string `json:"type,omitempty"` } // MarshalJSON is the custom marshaler for TrackedResource. func (tr TrackedResource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) if tr.Location != nil { objectMap["location"] = tr.Location } if tr.Tags != nil { objectMap["tags"] = tr.Tags } return json.Marshal(objectMap) } // URLFileExtensionMatchConditionParameters defines the parameters for UrlFileExtension match conditions type URLFileExtensionMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'URLFileExtensionOperatorAny', 'URLFileExtensionOperatorEqual', 'URLFileExtensionOperatorContains', 'URLFileExtensionOperatorBeginsWith', 'URLFileExtensionOperatorEndsWith', 'URLFileExtensionOperatorLessThan', 'URLFileExtensionOperatorLessThanOrEqual', 'URLFileExtensionOperatorGreaterThan', 'URLFileExtensionOperatorGreaterThanOrEqual' Operator URLFileExtensionOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // URLFileNameMatchConditionParameters defines the parameters for UrlFilename match conditions type URLFileNameMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'URLFileNameOperatorAny', 'URLFileNameOperatorEqual', 'URLFileNameOperatorContains', 'URLFileNameOperatorBeginsWith', 'URLFileNameOperatorEndsWith', 'URLFileNameOperatorLessThan', 'URLFileNameOperatorLessThanOrEqual', 'URLFileNameOperatorGreaterThan', 'URLFileNameOperatorGreaterThanOrEqual' Operator URLFileNameOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // URLPathMatchConditionParameters defines the parameters for UrlPath match conditions type URLPathMatchConditionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // Operator - Describes operator to be matched. Possible values include: 'URLPathOperatorAny', 'URLPathOperatorEqual', 'URLPathOperatorContains', 'URLPathOperatorBeginsWith', 'URLPathOperatorEndsWith', 'URLPathOperatorLessThan', 'URLPathOperatorLessThanOrEqual', 'URLPathOperatorGreaterThan', 'URLPathOperatorGreaterThanOrEqual', 'URLPathOperatorWildcard' Operator URLPathOperator `json:"operator,omitempty"` // NegateCondition - Describes if this is negate condition or not NegateCondition *bool `json:"negateCondition,omitempty"` // MatchValues - The match value for the condition of the delivery rule MatchValues *[]string `json:"matchValues,omitempty"` // Transforms - List of transforms Transforms *[]Transform `json:"transforms,omitempty"` } // URLRedirectAction defines the url redirect action for the delivery rule. type URLRedirectAction struct { // Parameters - Defines the parameters for the action. Parameters *URLRedirectActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for URLRedirectAction. func (ura URLRedirectAction) MarshalJSON() ([]byte, error) { ura.Name = NameURLRedirect objectMap := make(map[string]interface{}) if ura.Parameters != nil { objectMap["parameters"] = ura.Parameters } if ura.Name != "" { objectMap["name"] = ura.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return &ura, true } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return nil, false } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for URLRedirectAction. func (ura URLRedirectAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &ura, true } // URLRedirectActionParameters defines the parameters for the url redirect action. type URLRedirectActionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // RedirectType - The redirect type the rule will use when redirecting traffic. Possible values include: 'Moved', 'Found', 'TemporaryRedirect', 'PermanentRedirect' RedirectType RedirectType `json:"redirectType,omitempty"` // DestinationProtocol - Protocol to use for the redirect. The default value is MatchRequest. Possible values include: 'MatchRequest', 'HTTP', 'HTTPS' DestinationProtocol DestinationProtocol `json:"destinationProtocol,omitempty"` // CustomPath - The full path to redirect. Path cannot be empty and must start with /. Leave empty to use the incoming path as destination path. CustomPath *string `json:"customPath,omitempty"` // CustomHostname - Host to redirect. Leave empty to use the incoming host as the destination host. CustomHostname *string `json:"customHostname,omitempty"` // CustomQueryString - The set of query strings to be placed in the redirect URL. Setting this value would replace any existing query string; leave empty to preserve the incoming query string. Query string must be in <key>=<value> format. ? and & will be added automatically so do not include them. CustomQueryString *string `json:"customQueryString,omitempty"` // CustomFragment - Fragment to add to the redirect URL. Fragment is the part of the URL that comes after #. Do not include the #. CustomFragment *string `json:"customFragment,omitempty"` } // URLRewriteAction defines the url rewrite action for the delivery rule. type URLRewriteAction struct { // Parameters - Defines the parameters for the action. Parameters *URLRewriteActionParameters `json:"parameters,omitempty"` // Name - Possible values include: 'NameDeliveryRuleAction', 'NameURLRedirect', 'NameURLRewrite', 'NameModifyRequestHeader', 'NameModifyResponseHeader', 'NameCacheExpiration', 'NameCacheKeyQueryString' Name NameBasicDeliveryRuleAction `json:"name,omitempty"` } // MarshalJSON is the custom marshaler for URLRewriteAction. func (ura URLRewriteAction) MarshalJSON() ([]byte, error) { ura.Name = NameURLRewrite objectMap := make(map[string]interface{}) if ura.Parameters != nil { objectMap["parameters"] = ura.Parameters } if ura.Name != "" { objectMap["name"] = ura.Name } return json.Marshal(objectMap) } // AsURLRedirectAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsURLRedirectAction() (*URLRedirectAction, bool) { return nil, false } // AsURLRewriteAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsURLRewriteAction() (*URLRewriteAction, bool) { return &ura, true } // AsDeliveryRuleRequestHeaderAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsDeliveryRuleRequestHeaderAction() (*DeliveryRuleRequestHeaderAction, bool) { return nil, false } // AsDeliveryRuleResponseHeaderAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsDeliveryRuleResponseHeaderAction() (*DeliveryRuleResponseHeaderAction, bool) { return nil, false } // AsDeliveryRuleCacheExpirationAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsDeliveryRuleCacheExpirationAction() (*DeliveryRuleCacheExpirationAction, bool) { return nil, false } // AsDeliveryRuleCacheKeyQueryStringAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsDeliveryRuleCacheKeyQueryStringAction() (*DeliveryRuleCacheKeyQueryStringAction, bool) { return nil, false } // AsDeliveryRuleAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsDeliveryRuleAction() (*DeliveryRuleAction, bool) { return nil, false } // AsBasicDeliveryRuleAction is the BasicDeliveryRuleAction implementation for URLRewriteAction. func (ura URLRewriteAction) AsBasicDeliveryRuleAction() (BasicDeliveryRuleAction, bool) { return &ura, true } // URLRewriteActionParameters defines the parameters for the url rewrite action. type URLRewriteActionParameters struct { OdataType *string `json:"@odata.type,omitempty"` // SourcePattern - define a request URI pattern that identifies the type of requests that may be rewritten. Currently, source pattern uses a prefix-based match. To match all URL paths, use "/" as the source pattern value. To match only the root directory and re-write this path, use the origin path field SourcePattern *string `json:"sourcePattern,omitempty"` // Destination - Define the destination path for be used in the rewrite. This will overwrite the source pattern Destination *string `json:"destination,omitempty"` // PreserveUnmatchedPath - If True, the remaining path after the source pattern will be appended to the new destination path. PreserveUnmatchedPath *bool `json:"preserveUnmatchedPath,omitempty"` } // UserManagedHTTPSParameters defines the certificate source parameters using user's keyvault certificate for // enabling SSL. type UserManagedHTTPSParameters struct { // CertificateSourceParameters - Defines the certificate source parameters using user's keyvault certificate for enabling SSL. CertificateSourceParameters *KeyVaultCertificateSourceParameters `json:"certificateSourceParameters,omitempty"` // ProtocolType - Defines the TLS extension protocol that is used for secure delivery. Possible values include: 'ServerNameIndication', 'IPBased' ProtocolType ProtocolType `json:"protocolType,omitempty"` // MinimumTLSVersion - TLS protocol version that will be used for Https. Possible values include: 'None', 'TLS10', 'TLS12' MinimumTLSVersion MinimumTLSVersion `json:"minimumTlsVersion,omitempty"` // CertificateSource - Possible values include: 'CertificateSourceCustomDomainHTTPSParameters', 'CertificateSourceCdn', 'CertificateSourceAzureKeyVault' CertificateSource CertificateSource `json:"certificateSource,omitempty"` } // MarshalJSON is the custom marshaler for UserManagedHTTPSParameters. func (umhp UserManagedHTTPSParameters) MarshalJSON() ([]byte, error) { umhp.CertificateSource = CertificateSourceAzureKeyVault objectMap := make(map[string]interface{}) if umhp.CertificateSourceParameters != nil { objectMap["certificateSourceParameters"] = umhp.CertificateSourceParameters } if umhp.ProtocolType != "" { objectMap["protocolType"] = umhp.ProtocolType } if umhp.MinimumTLSVersion != "" { objectMap["minimumTlsVersion"] = umhp.MinimumTLSVersion } if umhp.CertificateSource != "" { objectMap["certificateSource"] = umhp.CertificateSource } return json.Marshal(objectMap) } // AsManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for UserManagedHTTPSParameters. func (umhp UserManagedHTTPSParameters) AsManagedHTTPSParameters() (*ManagedHTTPSParameters, bool) { return nil, false } // AsUserManagedHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for UserManagedHTTPSParameters. func (umhp UserManagedHTTPSParameters) AsUserManagedHTTPSParameters() (*UserManagedHTTPSParameters, bool) { return &umhp, true } // AsCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for UserManagedHTTPSParameters. func (umhp UserManagedHTTPSParameters) AsCustomDomainHTTPSParameters() (*CustomDomainHTTPSParameters, bool) { return nil, false } // AsBasicCustomDomainHTTPSParameters is the BasicCustomDomainHTTPSParameters implementation for UserManagedHTTPSParameters. func (umhp UserManagedHTTPSParameters) AsBasicCustomDomainHTTPSParameters() (BasicCustomDomainHTTPSParameters, bool) { return &umhp, true } // ValidateCustomDomainInput input of the custom domain to be validated for DNS mapping. type ValidateCustomDomainInput struct { // HostName - The host name of the custom domain. Must be a domain name. HostName *string `json:"hostName,omitempty"` } // ValidateCustomDomainOutput output of custom domain validation. type ValidateCustomDomainOutput struct { autorest.Response `json:"-"` // CustomDomainValidated - READ-ONLY; Indicates whether the custom domain is valid or not. CustomDomainValidated *bool `json:"customDomainValidated,omitempty"` // Reason - READ-ONLY; The reason why the custom domain is not valid. Reason *string `json:"reason,omitempty"` // Message - READ-ONLY; Error message describing why the custom domain is not valid. Message *string `json:"message,omitempty"` } // ValidateProbeInput input of the validate probe API. type ValidateProbeInput struct { // ProbeURL - The probe URL to validate. ProbeURL *string `json:"probeURL,omitempty"` } // ValidateProbeOutput output of the validate probe API. type ValidateProbeOutput struct { autorest.Response `json:"-"` // IsValid - READ-ONLY; Indicates whether the probe URL is accepted or not. IsValid *bool `json:"isValid,omitempty"` // ErrorCode - READ-ONLY; Specifies the error code when the probe url is not accepted. ErrorCode *string `json:"errorCode,omitempty"` // Message - READ-ONLY; The detailed error message describing why the probe URL is not accepted. Message *string `json:"message,omitempty"` }
// If the operation has not completed it will return an error. func (future *ProfilesDeleteFuture) Result(client ProfilesClient) (ar autorest.Response, err error) {
error.rs
use std::fmt::Display; use std::fmt::Error as FmtError; use std::fmt::Formatter; use std::io::Error as IOError; use serde_json::Error as JsonError; #[derive(Debug)] pub enum TreasureError { IO(IOError), JSON(JsonError) } impl Display for TreasureError { fn
(&self, f: &mut Formatter) -> Result<(), FmtError> { match self { Self::IO(io) => write!(f, "An IO error occurred: {}", io), Self::JSON(json) => write!(f, "A JSON error occurred: {}", json) } } } impl std::error::Error for TreasureError {} impl From<IOError> for TreasureError { fn from(io: IOError) -> Self { TreasureError::IO(io) } } impl From<JsonError> for TreasureError { fn from(json: JsonError) -> Self { TreasureError::JSON(json) } }
fmt
access_level.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['AccessLevelArgs', 'AccessLevel'] @pulumi.input_type class AccessLevelArgs: def __init__(__self__, *, access_policy_id: pulumi.Input[str], basic: Optional[pulumi.Input['BasicLevelArgs']] = None, custom: Optional[pulumi.Input['CustomLevelArgs']] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a AccessLevel resource. :param pulumi.Input['BasicLevelArgs'] basic: A `BasicLevel` composed of `Conditions`. :param pulumi.Input['CustomLevelArgs'] custom: A `CustomLevel` written in the Common Expression Language. :param pulumi.Input[str] description: Description of the `AccessLevel` and its use. Does not affect behavior. :param pulumi.Input[str] name: Resource name for the Access Level. The `short_name` component must begin with a letter and only include alphanumeric and '_'. Format: `accessPolicies/{access_policy}/accessLevels/{access_level}`. The maximum length of the `access_level` component is 50 characters. :param pulumi.Input[str] title: Human readable title. Must be unique within the Policy. """ pulumi.set(__self__, "access_policy_id", access_policy_id) if basic is not None: pulumi.set(__self__, "basic", basic) if custom is not None: pulumi.set(__self__, "custom", custom) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if title is not None: pulumi.set(__self__, "title", title) @property @pulumi.getter(name="accessPolicyId") def access_policy_id(self) -> pulumi.Input[str]: return pulumi.get(self, "access_policy_id") @access_policy_id.setter def access_policy_id(self, value: pulumi.Input[str]): pulumi.set(self, "access_policy_id", value) @property @pulumi.getter def basic(self) -> Optional[pulumi.Input['BasicLevelArgs']]: """ A `BasicLevel` composed of `Conditions`. """ return pulumi.get(self, "basic") @basic.setter def basic(self, value: Optional[pulumi.Input['BasicLevelArgs']]): pulumi.set(self, "basic", value) @property @pulumi.getter def custom(self) -> Optional[pulumi.Input['CustomLevelArgs']]: """ A `CustomLevel` written in the Common Expression Language. """ return pulumi.get(self, "custom") @custom.setter def custom(self, value: Optional[pulumi.Input['CustomLevelArgs']]): pulumi.set(self, "custom", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the `AccessLevel` and its use. Does not affect behavior. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Resource name for the Access Level. The `short_name` component must begin with a letter and only include alphanumeric and '_'. Format: `accessPolicies/{access_policy}/accessLevels/{access_level}`. The maximum length of the `access_level` component is 50 characters. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def title(self) -> Optional[pulumi.Input[str]]: """ Human readable title. Must be unique within the Policy. """ return pulumi.get(self, "title") @title.setter def title(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "title", value) class AccessLevel(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, access_policy_id: Optional[pulumi.Input[str]] = None, basic: Optional[pulumi.Input[pulumi.InputType['BasicLevelArgs']]] = None, custom: Optional[pulumi.Input[pulumi.InputType['CustomLevelArgs']]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, __props__=None): """ Creates an access level. The long-running operation from this RPC has a successful status after the access level propagates to long-lasting storage. If access levels contain errors, an error response is returned for the first error encountered. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['BasicLevelArgs']] basic: A `BasicLevel` composed of `Conditions`. :param pulumi.Input[pulumi.InputType['CustomLevelArgs']] custom: A `CustomLevel` written in the Common Expression Language. :param pulumi.Input[str] description: Description of the `AccessLevel` and its use. Does not affect behavior. :param pulumi.Input[str] name: Resource name for the Access Level. The `short_name` component must begin with a letter and only include alphanumeric and '_'. Format: `accessPolicies/{access_policy}/accessLevels/{access_level}`. The maximum length of the `access_level` component is 50 characters. :param pulumi.Input[str] title: Human readable title. Must be unique within the Policy. """ ... @overload def __init__(__self__, resource_name: str, args: AccessLevelArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates an access level. The long-running operation from this RPC has a successful status after the access level propagates to long-lasting storage. If access levels contain errors, an error response is returned for the first error encountered. :param str resource_name: The name of the resource. :param AccessLevelArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(AccessLevelArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def
(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, access_policy_id: Optional[pulumi.Input[str]] = None, basic: Optional[pulumi.Input[pulumi.InputType['BasicLevelArgs']]] = None, custom: Optional[pulumi.Input[pulumi.InputType['CustomLevelArgs']]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, title: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = AccessLevelArgs.__new__(AccessLevelArgs) if access_policy_id is None and not opts.urn: raise TypeError("Missing required property 'access_policy_id'") __props__.__dict__["access_policy_id"] = access_policy_id __props__.__dict__["basic"] = basic __props__.__dict__["custom"] = custom __props__.__dict__["description"] = description __props__.__dict__["name"] = name __props__.__dict__["title"] = title super(AccessLevel, __self__).__init__( 'google-native:accesscontextmanager/v1:AccessLevel', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'AccessLevel': """ Get an existing AccessLevel resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = AccessLevelArgs.__new__(AccessLevelArgs) __props__.__dict__["basic"] = None __props__.__dict__["custom"] = None __props__.__dict__["description"] = None __props__.__dict__["name"] = None __props__.__dict__["title"] = None return AccessLevel(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def basic(self) -> pulumi.Output['outputs.BasicLevelResponse']: """ A `BasicLevel` composed of `Conditions`. """ return pulumi.get(self, "basic") @property @pulumi.getter def custom(self) -> pulumi.Output['outputs.CustomLevelResponse']: """ A `CustomLevel` written in the Common Expression Language. """ return pulumi.get(self, "custom") @property @pulumi.getter def description(self) -> pulumi.Output[str]: """ Description of the `AccessLevel` and its use. Does not affect behavior. """ return pulumi.get(self, "description") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name for the Access Level. The `short_name` component must begin with a letter and only include alphanumeric and '_'. Format: `accessPolicies/{access_policy}/accessLevels/{access_level}`. The maximum length of the `access_level` component is 50 characters. """ return pulumi.get(self, "name") @property @pulumi.getter def title(self) -> pulumi.Output[str]: """ Human readable title. Must be unique within the Policy. """ return pulumi.get(self, "title")
_internal_init
bloom_filter_factory.py
#!/usr/bin/env python from bloom_filter import * from math import log from math import ceil """Returns a bloom_filter with optimal storage parameters for initialization file, filled with the words from the initialization file """ def fill_filter_from_file(filepath, max_error_prob, suppres_prints = False): with open(filepath) as f: word_num = int(f.readline().strip("\r\n")) if (not suppres_prints): print "Number of words: " + str(word_num) # calculate optimal parameters for the bloom filter optimal_hash_num = int(ceil(log(1 / max_error_prob))) optimal_filter_len = int(ceil(word_num * log(1 / max_error_prob) / (log(2) ** 2))) if (not suppres_prints):
# fill the filter with the words from the init file for line in f: filter.add(line.strip("\r\n")) return filter
print "Number of hashes: %s\nFilter length: %s" \ % (optimal_hash_num, optimal_filter_len) filter = bloom_filter(optimal_filter_len, optimal_hash_num)
tools.go
// +build tools
package tools import _ "github.com/iamolegga/rebus"
subscription_item_request_builder.go
package item import ( ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go" i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87 "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph" i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/odataerrors" ) // SubscriptionItemRequestBuilder provides operations to manage the subscriptions property of the microsoft.graph.list entity. type SubscriptionItemRequestBuilder struct { // Path parameters for the request pathParameters map[string]string; // The request adapter to use to execute the requests. requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter; // Url template to use to build the URL for the current request builder urlTemplate string; } // SubscriptionItemRequestBuilderDeleteOptions options for Delete type SubscriptionItemRequestBuilderDeleteOptions struct { // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // SubscriptionItemRequestBuilderGetOptions options for Get type SubscriptionItemRequestBuilderGetOptions struct { // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Request query parameters Q *SubscriptionItemRequestBuilderGetQueryParameters; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // SubscriptionItemRequestBuilderGetQueryParameters the set of subscriptions on the list. type SubscriptionItemRequestBuilderGetQueryParameters struct { // Expand related entities Expand []string; // Select properties to be returned Select []string; } // SubscriptionItemRequestBuilderPatchOptions options for Patch type SubscriptionItemRequestBuilderPatchOptions struct { // Body i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.Subscriptionable; // Request headers H map[string]string; // Request options O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption; // Response handler to use in place of the default response handling provided by the core service ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler; } // NewSubscriptionItemRequestBuilderInternal instantiates a new SubscriptionItemRequestBuilder and sets the default values. func NewSubscriptionItemRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*SubscriptionItemRequestBuilder) { m := &SubscriptionItemRequestBuilder{ } m.urlTemplate = "{+baseurl}/groups/{group_id}/sites/{site_id}/lists/{list_id}/subscriptions/{subscription_id}{?select,expand}"; urlTplParams := make(map[string]string) for idx, item := range pathParameters { urlTplParams[idx] = item } m.pathParameters = urlTplParams; m.requestAdapter = requestAdapter; return m } // NewSubscriptionItemRequestBuilder instantiates a new SubscriptionItemRequestBuilder and sets the default values. func NewSubscriptionItemRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*SubscriptionItemRequestBuilder)
// CreateDeleteRequestInformation delete navigation property subscriptions for groups func (m *SubscriptionItemRequestBuilder) CreateDeleteRequestInformation(options *SubscriptionItemRequestBuilderDeleteOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.DELETE if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // CreateGetRequestInformation the set of subscriptions on the list. func (m *SubscriptionItemRequestBuilder) CreateGetRequestInformation(options *SubscriptionItemRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET if options != nil && options.Q != nil { requestInfo.AddQueryParameters(*(options.Q)) } if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // CreatePatchRequestInformation update the navigation property subscriptions in groups func (m *SubscriptionItemRequestBuilder) CreatePatchRequestInformation(options *SubscriptionItemRequestBuilderPatchOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) { requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.PATCH requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body) if options != nil && options.H != nil { requestInfo.Headers = options.H } if options != nil && len(options.O) != 0 { err := requestInfo.AddRequestOptions(options.O...) if err != nil { return nil, err } } return requestInfo, nil } // Delete delete navigation property subscriptions for groups func (m *SubscriptionItemRequestBuilder) Delete(options *SubscriptionItemRequestBuilderDeleteOptions)(error) { requestInfo, err := m.CreateDeleteRequestInformation(options); if err != nil { return err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } err = m.requestAdapter.SendNoContentAsync(requestInfo, nil, errorMapping) if err != nil { return err } return nil } // Get the set of subscriptions on the list. func (m *SubscriptionItemRequestBuilder) Get(options *SubscriptionItemRequestBuilderGetOptions)(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.Subscriptionable, error) { requestInfo, err := m.CreateGetRequestInformation(options); if err != nil { return nil, err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } res, err := m.requestAdapter.SendAsync(requestInfo, i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.CreateSubscriptionFromDiscriminatorValue, nil, errorMapping) if err != nil { return nil, err } return res.(i4a838ef194e4c99e9f2c63ba10dab9cb120a89367c1d4ab0daa63bb424e20d87.Subscriptionable), nil } // Patch update the navigation property subscriptions in groups func (m *SubscriptionItemRequestBuilder) Patch(options *SubscriptionItemRequestBuilderPatchOptions)(error) { requestInfo, err := m.CreatePatchRequestInformation(options); if err != nil { return err } errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings { "4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, "5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue, } err = m.requestAdapter.SendNoContentAsync(requestInfo, nil, errorMapping) if err != nil { return err } return nil }
{ urlParams := make(map[string]string) urlParams["request-raw-url"] = rawUrl return NewSubscriptionItemRequestBuilderInternal(urlParams, requestAdapter) }
volume_server.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.12.3 // source: volume_server.proto package volume_server_pb import ( context "context" remote_pb "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type BatchDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` } func (x *BatchDeleteRequest) Reset() { *x = BatchDeleteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BatchDeleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*BatchDeleteRequest) ProtoMessage() {} func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead. func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{0} } func (x *BatchDeleteRequest) GetFileIds() []string { if x != nil { return x.FileIds } return nil } func (x *BatchDeleteRequest) GetSkipCookieCheck() bool { if x != nil { return x.SkipCookieCheck } return false } type BatchDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } func (x *BatchDeleteResponse) Reset() { *x = BatchDeleteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BatchDeleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*BatchDeleteResponse) ProtoMessage() {} func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead. func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{1} } func (x *BatchDeleteResponse) GetResults() []*DeleteResult { if x != nil { return x.Results } return nil } type DeleteResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` } func (x *DeleteResult) Reset() { *x = DeleteResult{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteResult) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteResult) ProtoMessage() {} func (x *DeleteResult) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead. func (*DeleteResult) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{2} } func (x *DeleteResult) GetFileId() string { if x != nil { return x.FileId } return "" } func (x *DeleteResult) GetStatus() int32 { if x != nil { return x.Status } return 0 } func (x *DeleteResult) GetError() string { if x != nil { return x.Error } return "" } func (x *DeleteResult) GetSize() uint32 { if x != nil { return x.Size } return 0 } func (x *DeleteResult) GetVersion() uint32 { if x != nil { return x.Version } return 0 } type Empty struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *Empty) Reset() { *x = Empty{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Empty) String() string { return protoimpl.X.MessageStringOf(x) } func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{3} } type VacuumVolumeCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCheckRequest) Reset() { *x = VacuumVolumeCheckRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCheckRequest) ProtoMessage() {} func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{4} } func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VacuumVolumeCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` } func (x *VacuumVolumeCheckResponse) Reset() { *x = VacuumVolumeCheckResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCheckResponse) ProtoMessage() {} func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{5} } func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { if x != nil { return x.GarbageRatio } return 0 } type VacuumVolumeCompactRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` } func (x *VacuumVolumeCompactRequest) Reset() { *x = VacuumVolumeCompactRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCompactRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCompactRequest) ProtoMessage() {} func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{6} } func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 { if x != nil { return x.Preallocate } return 0 } type VacuumVolumeCompactResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ProcessedBytes int64 `protobuf:"varint,1,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` } func (x *VacuumVolumeCompactResponse) Reset() { *x = VacuumVolumeCompactResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCompactResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCompactResponse) ProtoMessage() {} func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{7} } func (x *VacuumVolumeCompactResponse) GetProcessedBytes() int64 { if x != nil { return x.ProcessedBytes } return 0 } type VacuumVolumeCommitRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCommitRequest) Reset() { *x = VacuumVolumeCommitRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCommitRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCommitRequest) ProtoMessage() {} func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{8} } func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VacuumVolumeCommitResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } func (x *VacuumVolumeCommitResponse) Reset() { *x = VacuumVolumeCommitResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCommitResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{9} } func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { if x != nil { return x.IsReadOnly } return false } type VacuumVolumeCleanupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCleanupRequest) Reset() { *x = VacuumVolumeCleanupRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCleanupRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCleanupRequest) ProtoMessage() {} func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{10} } func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VacuumVolumeCleanupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VacuumVolumeCleanupResponse) Reset() { *x = VacuumVolumeCleanupResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VacuumVolumeCleanupResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VacuumVolumeCleanupResponse) ProtoMessage() {} func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{11} } type DeleteCollectionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *DeleteCollectionRequest) Reset() { *x = DeleteCollectionRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteCollectionRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteCollectionRequest) ProtoMessage() {} func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{12} } func (x *DeleteCollectionRequest) GetCollection() string { if x != nil { return x.Collection } return "" } type DeleteCollectionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DeleteCollectionResponse) Reset() { *x = DeleteCollectionResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DeleteCollectionResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DeleteCollectionResponse) ProtoMessage() {} func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{13} } type AllocateVolumeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *AllocateVolumeRequest) Reset() { *x = AllocateVolumeRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AllocateVolumeRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*AllocateVolumeRequest) ProtoMessage() {} func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead. func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{14} } func (x *AllocateVolumeRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *AllocateVolumeRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *AllocateVolumeRequest) GetPreallocate() int64 { if x != nil { return x.Preallocate } return 0 } func (x *AllocateVolumeRequest) GetReplication() string { if x != nil { return x.Replication } return "" } func (x *AllocateVolumeRequest) GetTtl() string { if x != nil { return x.Ttl } return "" } func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { if x != nil { return x.MemoryMapMaxSizeMb } return 0 } func (x *AllocateVolumeRequest) GetDiskType() string { if x != nil { return x.DiskType } return "" } type AllocateVolumeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *AllocateVolumeResponse) Reset() { *x = AllocateVolumeResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AllocateVolumeResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*AllocateVolumeResponse) ProtoMessage() {} func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead. func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{15} } type VolumeSyncStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeSyncStatusRequest) Reset() { *x = VolumeSyncStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeSyncStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeSyncStatusRequest) ProtoMessage() {} func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{16} } func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeSyncStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` } func (x *VolumeSyncStatusResponse) Reset() { *x = VolumeSyncStatusResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeSyncStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeSyncStatusResponse) ProtoMessage() {} func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{17} } func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeSyncStatusResponse) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeSyncStatusResponse) GetReplication() string { if x != nil { return x.Replication } return "" } func (x *VolumeSyncStatusResponse) GetTtl() string { if x != nil { return x.Ttl } return "" } func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 { if x != nil { return x.TailOffset } return 0 } func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 { if x != nil { return x.CompactRevision } return 0 } func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { if x != nil { return x.IdxFileSize } return 0 } type VolumeIncrementalCopyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` } func (x *VolumeIncrementalCopyRequest) Reset() { *x = VolumeIncrementalCopyRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeIncrementalCopyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeIncrementalCopyRequest) ProtoMessage() {} func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{18} } func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { if x != nil { return x.SinceNs } return 0 } type VolumeIncrementalCopyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } func (x *VolumeIncrementalCopyResponse) Reset() { *x = VolumeIncrementalCopyResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeIncrementalCopyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeIncrementalCopyResponse) ProtoMessage() {} func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{19} } func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { if x != nil { return x.FileContent } return nil } type VolumeMountRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMountRequest) Reset() { *x = VolumeMountRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMountRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMountRequest) ProtoMessage() {} func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead. func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{20} } func (x *VolumeMountRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeMountResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeMountResponse) Reset() { *x = VolumeMountResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMountResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMountResponse) ProtoMessage() {} func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead. func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{21} } type VolumeUnmountRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeUnmountRequest) Reset() { *x = VolumeUnmountRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeUnmountRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeUnmountRequest) ProtoMessage() {} func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{22} } func (x *VolumeUnmountRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeUnmountResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeUnmountResponse) Reset() { *x = VolumeUnmountResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeUnmountResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeUnmountResponse) ProtoMessage() {} func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{23} } type VolumeDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeDeleteRequest) Reset() { *x = VolumeDeleteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeDeleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeDeleteRequest) ProtoMessage() {} func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{24} } func (x *VolumeDeleteRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeDeleteResponse) Reset() { *x = VolumeDeleteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeDeleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeDeleteResponse) ProtoMessage() {} func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{25} } type VolumeMarkReadonlyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMarkReadonlyRequest) Reset() { *x = VolumeMarkReadonlyRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMarkReadonlyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMarkReadonlyRequest) ProtoMessage() {} func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{26} } func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeMarkReadonlyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeMarkReadonlyResponse) Reset() { *x = VolumeMarkReadonlyResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMarkReadonlyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{27} } type VolumeMarkWritableRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMarkWritableRequest) Reset() { *x = VolumeMarkWritableRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMarkWritableRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMarkWritableRequest) ProtoMessage() {} func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMarkWritableRequest.ProtoReflect.Descriptor instead. func (*VolumeMarkWritableRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{28} } func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeMarkWritableResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeMarkWritableResponse) Reset() { *x = VolumeMarkWritableResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeMarkWritableResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeMarkWritableResponse) ProtoMessage() {} func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeMarkWritableResponse.ProtoReflect.Descriptor instead. func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{29} } type VolumeConfigureRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` } func (x *VolumeConfigureRequest) Reset() { *x = VolumeConfigureRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeConfigureRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeConfigureRequest) ProtoMessage() {} func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead. func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{30} } func (x *VolumeConfigureRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeConfigureRequest) GetReplication() string { if x != nil { return x.Replication } return "" } type VolumeConfigureResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *VolumeConfigureResponse) Reset() { *x = VolumeConfigureResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeConfigureResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeConfigureResponse) ProtoMessage() {} func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead. func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{31} } func (x *VolumeConfigureResponse) GetError() string { if x != nil { return x.Error } return "" } type VolumeStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeStatusRequest) Reset() { *x = VolumeStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeStatusRequest) ProtoMessage() {} func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeStatusRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{32} } func (x *VolumeStatusRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type VolumeStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } func (x *VolumeStatusResponse) Reset() { *x = VolumeStatusResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeStatusResponse) ProtoMessage() {} func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeStatusResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{33} } func (x *VolumeStatusResponse) GetIsReadOnly() bool { if x != nil { return x.IsReadOnly } return false } type VolumeCopyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *VolumeCopyRequest) Reset() { *x = VolumeCopyRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeCopyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeCopyRequest) ProtoMessage() {} func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{34} } func (x *VolumeCopyRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeCopyRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeCopyRequest) GetReplication() string { if x != nil { return x.Replication } return "" } func (x *VolumeCopyRequest) GetTtl() string { if x != nil { return x.Ttl } return "" } func (x *VolumeCopyRequest) GetSourceDataNode() string { if x != nil { return x.SourceDataNode } return "" } func (x *VolumeCopyRequest) GetDiskType() string { if x != nil { return x.DiskType } return "" } type VolumeCopyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` ProcessedBytes int64 `protobuf:"varint,2,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` } func (x *VolumeCopyResponse) Reset() { *x = VolumeCopyResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeCopyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeCopyResponse) ProtoMessage() {} func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{35} } func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 { if x != nil { return x.LastAppendAtNs } return 0 } func (x *VolumeCopyResponse) GetProcessedBytes() int64 { if x != nil { return x.ProcessedBytes } return 0 } type CopyFileRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` } func (x *CopyFileRequest) Reset() { *x = CopyFileRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CopyFileRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CopyFileRequest) ProtoMessage() {} func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead. func (*CopyFileRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{36} } func (x *CopyFileRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *CopyFileRequest) GetExt() string { if x != nil { return x.Ext } return "" } func (x *CopyFileRequest) GetCompactionRevision() uint32 { if x != nil { return x.CompactionRevision } return 0 } func (x *CopyFileRequest) GetStopOffset() uint64 { if x != nil { return x.StopOffset } return 0 } func (x *CopyFileRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *CopyFileRequest) GetIsEcVolume() bool { if x != nil { return x.IsEcVolume } return false } func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { if x != nil { return x.IgnoreSourceFileNotFound } return false } type CopyFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` ModifiedTsNs int64 `protobuf:"varint,2,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"` } func (x *CopyFileResponse) Reset() { *x = CopyFileResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CopyFileResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CopyFileResponse) ProtoMessage() {} func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead. func (*CopyFileResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{37} } func (x *CopyFileResponse) GetFileContent() []byte { if x != nil { return x.FileContent } return nil } func (x *CopyFileResponse) GetModifiedTsNs() int64 { if x != nil { return x.ModifiedTsNs } return 0 } type ReadNeedleBlobRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` } func (x *ReadNeedleBlobRequest) Reset() { *x = ReadNeedleBlobRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadNeedleBlobRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadNeedleBlobRequest) ProtoMessage() {} func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{38} } func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } func (x *ReadNeedleBlobRequest) GetOffset() int64 { if x != nil { return x.Offset } return 0 } func (x *ReadNeedleBlobRequest) GetSize() int32 { if x != nil { return x.Size } return 0 } type ReadNeedleBlobResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *ReadNeedleBlobResponse) Reset() { *x = ReadNeedleBlobResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadNeedleBlobResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadNeedleBlobResponse) ProtoMessage() {} func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{39} } func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { if x != nil { return x.NeedleBlob } return nil } type WriteNeedleBlobRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *WriteNeedleBlobRequest) Reset() { *x = WriteNeedleBlobRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *WriteNeedleBlobRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*WriteNeedleBlobRequest) ProtoMessage() {} func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{40} } func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *WriteNeedleBlobRequest) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } func (x *WriteNeedleBlobRequest) GetSize() int32 { if x != nil { return x.Size } return 0 } func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte { if x != nil { return x.NeedleBlob } return nil } type WriteNeedleBlobResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *WriteNeedleBlobResponse) Reset() { *x = WriteNeedleBlobResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *WriteNeedleBlobResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*WriteNeedleBlobResponse) ProtoMessage() {} func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{41} } type ReadAllNeedlesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeIds []uint32 `protobuf:"varint,1,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } func (x *ReadAllNeedlesRequest) Reset() { *x = ReadAllNeedlesRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadAllNeedlesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadAllNeedlesRequest) ProtoMessage() {} func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadAllNeedlesRequest.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{42} } func (x *ReadAllNeedlesRequest) GetVolumeIds() []uint32 { if x != nil { return x.VolumeIds } return nil } type ReadAllNeedlesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Cookie uint32 `protobuf:"varint,3,opt,name=cookie,proto3" json:"cookie,omitempty"` NeedleBlob []byte `protobuf:"bytes,5,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *ReadAllNeedlesResponse) Reset() { *x = ReadAllNeedlesResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadAllNeedlesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadAllNeedlesResponse) ProtoMessage() {} func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadAllNeedlesResponse.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{43} } func (x *ReadAllNeedlesResponse) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *ReadAllNeedlesResponse) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } func (x *ReadAllNeedlesResponse) GetCookie() uint32 { if x != nil { return x.Cookie } return 0 } func (x *ReadAllNeedlesResponse) GetNeedleBlob() []byte { if x != nil { return x.NeedleBlob } return nil } type VolumeTailSenderRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` } func (x *VolumeTailSenderRequest) Reset() { *x = VolumeTailSenderRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTailSenderRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTailSenderRequest) ProtoMessage() {} func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{44} } func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeTailSenderRequest) GetSinceNs() uint64 { if x != nil { return x.SinceNs } return 0 } func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { if x != nil { return x.IdleTimeoutSeconds } return 0 } type VolumeTailSenderResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` } func (x *VolumeTailSenderResponse) Reset() { *x = VolumeTailSenderResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTailSenderResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTailSenderResponse) ProtoMessage() {} func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{45} } func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { if x != nil { return x.NeedleHeader } return nil } func (x *VolumeTailSenderResponse) GetNeedleBody() []byte { if x != nil { return x.NeedleBody } return nil } func (x *VolumeTailSenderResponse) GetIsLastChunk() bool { if x != nil { return x.IsLastChunk } return false } type VolumeTailReceiverRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` } func (x *VolumeTailReceiverRequest) Reset() { *x = VolumeTailReceiverRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTailReceiverRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTailReceiverRequest) ProtoMessage() {} func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{46} } func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 { if x != nil { return x.SinceNs } return 0 } func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { if x != nil { return x.IdleTimeoutSeconds } return 0 } func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string { if x != nil { return x.SourceVolumeServer } return "" } type VolumeTailReceiverResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeTailReceiverResponse) Reset() { *x = VolumeTailReceiverResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTailReceiverResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTailReceiverResponse) ProtoMessage() {} func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{47} } type VolumeEcShardsGenerateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsGenerateRequest) Reset() { *x = VolumeEcShardsGenerateRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsGenerateRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{48} } func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsGenerateRequest) GetCollection() string { if x != nil { return x.Collection } return "" } type VolumeEcShardsGenerateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsGenerateResponse) Reset() { *x = VolumeEcShardsGenerateResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsGenerateResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{49} } type VolumeEcShardsRebuildRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsRebuildRequest) Reset() { *x = VolumeEcShardsRebuildRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsRebuildRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{50} } func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsRebuildRequest) GetCollection() string { if x != nil { return x.Collection } return "" } type VolumeEcShardsRebuildResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` } func (x *VolumeEcShardsRebuildResponse) Reset() { *x = VolumeEcShardsRebuildResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsRebuildResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{51} } func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { if x != nil { return x.RebuiltShardIds } return nil } type VolumeEcShardsCopyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` } func (x *VolumeEcShardsCopyRequest) Reset() { *x = VolumeEcShardsCopyRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsCopyRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsCopyRequest) ProtoMessage() {} func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{52} } func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsCopyRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { if x != nil { return x.ShardIds } return nil } func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { if x != nil { return x.CopyEcxFile } return false } func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string { if x != nil { return x.SourceDataNode } return "" } func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { if x != nil { return x.CopyEcjFile } return false } func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { if x != nil { return x.CopyVifFile } return false } type VolumeEcShardsCopyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsCopyResponse) Reset() { *x = VolumeEcShardsCopyResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsCopyResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsCopyResponse) ProtoMessage() {} func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{53} } type VolumeEcShardsDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsDeleteRequest) Reset() { *x = VolumeEcShardsDeleteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsDeleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{54} } func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsDeleteRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { if x != nil { return x.ShardIds } return nil } type VolumeEcShardsDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsDeleteResponse) Reset() { *x = VolumeEcShardsDeleteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsDeleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{55} } type VolumeEcShardsMountRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsMountRequest) Reset() { *x = VolumeEcShardsMountRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsMountRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsMountRequest) ProtoMessage() {} func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{56} } func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsMountRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 { if x != nil { return x.ShardIds } return nil } type VolumeEcShardsMountResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsMountResponse) Reset() { *x = VolumeEcShardsMountResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsMountResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsMountResponse) ProtoMessage() {} func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{57} } type VolumeEcShardsUnmountRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsUnmountRequest) Reset() { *x = VolumeEcShardsUnmountRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsUnmountRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{58} } func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { if x != nil { return x.ShardIds } return nil } type VolumeEcShardsUnmountResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsUnmountResponse) Reset() { *x = VolumeEcShardsUnmountResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsUnmountResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{59} } type VolumeEcShardReadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` } func (x *VolumeEcShardReadRequest) Reset() { *x = VolumeEcShardReadRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardReadRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardReadRequest) ProtoMessage() {} func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{60} } func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardReadRequest) GetShardId() uint32 { if x != nil { return x.ShardId } return 0 } func (x *VolumeEcShardReadRequest) GetOffset() int64 { if x != nil { return x.Offset } return 0 } func (x *VolumeEcShardReadRequest) GetSize() int64 { if x != nil { return x.Size } return 0 } func (x *VolumeEcShardReadRequest) GetFileKey() uint64 { if x != nil { return x.FileKey } return 0 } type VolumeEcShardReadResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` } func (x *VolumeEcShardReadResponse) Reset() { *x = VolumeEcShardReadResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardReadResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardReadResponse) ProtoMessage() {} func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{61} } func (x *VolumeEcShardReadResponse) GetData() []byte { if x != nil { return x.Data } return nil } func (x *VolumeEcShardReadResponse) GetIsDeleted() bool { if x != nil { return x.IsDeleted } return false } type VolumeEcBlobDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` } func (x *VolumeEcBlobDeleteRequest) Reset() { *x = VolumeEcBlobDeleteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcBlobDeleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{62} } func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcBlobDeleteRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { if x != nil { return x.FileKey } return 0 } func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 { if x != nil { return x.Version } return 0 } type VolumeEcBlobDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcBlobDeleteResponse) Reset() { *x = VolumeEcBlobDeleteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcBlobDeleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{63} } type VolumeEcShardsToVolumeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsToVolumeRequest) Reset() { *x = VolumeEcShardsToVolumeRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsToVolumeRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{64} } func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeEcShardsToVolumeRequest) GetCollection() string { if x != nil { return x.Collection } return "" } type VolumeEcShardsToVolumeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsToVolumeResponse) Reset() { *x = VolumeEcShardsToVolumeResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeEcShardsToVolumeResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{65} } type ReadVolumeFileStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *ReadVolumeFileStatusRequest) Reset() { *x = ReadVolumeFileStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadVolumeFileStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{66} } func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } type ReadVolumeFileStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *ReadVolumeFileStatusResponse) Reset() { *x = ReadVolumeFileStatusResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ReadVolumeFileStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{67} } func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { if x != nil { return x.IdxFileTimestampSeconds } return 0 } func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { if x != nil { return x.IdxFileSize } return 0 } func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { if x != nil { return x.DatFileTimestampSeconds } return 0 } func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { if x != nil { return x.DatFileSize } return 0 } func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 { if x != nil { return x.FileCount } return 0 } func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { if x != nil { return x.CompactionRevision } return 0 } func (x *ReadVolumeFileStatusResponse) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *ReadVolumeFileStatusResponse) GetDiskType() string { if x != nil { return x.DiskType } return "" } type DiskStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *DiskStatus) Reset() { *x = DiskStatus{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DiskStatus) String() string { return protoimpl.X.MessageStringOf(x) } func (*DiskStatus) ProtoMessage() {} func (x *DiskStatus) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. func (*DiskStatus) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{68} } func (x *DiskStatus) GetDir() string { if x != nil { return x.Dir } return "" } func (x *DiskStatus) GetAll() uint64 { if x != nil { return x.All } return 0 } func (x *DiskStatus) GetUsed() uint64 { if x != nil { return x.Used } return 0 } func (x *DiskStatus) GetFree() uint64 { if x != nil { return x.Free } return 0 } func (x *DiskStatus) GetPercentFree() float32 { if x != nil { return x.PercentFree } return 0 } func (x *DiskStatus) GetPercentUsed() float32 { if x != nil { return x.PercentUsed } return 0 } func (x *DiskStatus) GetDiskType() string { if x != nil { return x.DiskType } return "" } type MemStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` } func (x *MemStatus) Reset() { *x = MemStatus{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MemStatus) String() string { return protoimpl.X.MessageStringOf(x) } func (*MemStatus) ProtoMessage() {} func (x *MemStatus) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. func (*MemStatus) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{69} } func (x *MemStatus) GetGoroutines() int32 { if x != nil { return x.Goroutines } return 0 } func (x *MemStatus) GetAll() uint64 { if x != nil { return x.All } return 0 } func (x *MemStatus) GetUsed() uint64 { if x != nil { return x.Used } return 0 } func (x *MemStatus) GetFree() uint64 { if x != nil { return x.Free } return 0 } func (x *MemStatus) GetSelf() uint64 { if x != nil { return x.Self } return 0 } func (x *MemStatus) GetHeap() uint64 { if x != nil { return x.Heap } return 0 } func (x *MemStatus) GetStack() uint64 { if x != nil { return x.Stack } return 0 } // tired storage on volume servers type RemoteFile struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` } func (x *RemoteFile) Reset() { *x = RemoteFile{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *RemoteFile) String() string { return protoimpl.X.MessageStringOf(x) } func (*RemoteFile) ProtoMessage() {} func (x *RemoteFile) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. func (*RemoteFile) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{70} } func (x *RemoteFile) GetBackendType() string { if x != nil { return x.BackendType } return "" } func (x *RemoteFile) GetBackendId() string { if x != nil { return x.BackendId } return "" } func (x *RemoteFile) GetKey() string { if x != nil { return x.Key } return "" } func (x *RemoteFile) GetOffset() uint64 { if x != nil { return x.Offset } return 0 } func (x *RemoteFile) GetFileSize() uint64 { if x != nil { return x.FileSize } return 0 } func (x *RemoteFile) GetModifiedTime() uint64 { if x != nil { return x.ModifiedTime } return 0 } func (x *RemoteFile) GetExtension() string { if x != nil { return x.Extension } return "" } type VolumeInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` } func (x *VolumeInfo) Reset() { *x = VolumeInfo{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeInfo) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeInfo) ProtoMessage() {} func (x *VolumeInfo) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. func (*VolumeInfo) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{71} } func (x *VolumeInfo) GetFiles() []*RemoteFile { if x != nil { return x.Files } return nil } func (x *VolumeInfo) GetVersion() uint32 { if x != nil { return x.Version } return 0 } func (x *VolumeInfo) GetReplication() string { if x != nil { return x.Replication } return "" } // tiered storage type VolumeTierMoveDatToRemoteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` } func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTierMoveDatToRemoteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{72} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { if x != nil { return x.DestinationBackendName } return "" } func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { if x != nil { return x.KeepLocalDatFile } return false } type VolumeTierMoveDatToRemoteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` } func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTierMoveDatToRemoteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{73} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { if x != nil { return x.Processed } return 0 } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { if x != nil { return x.ProcessedPercentage } return 0 } type VolumeTierMoveDatFromRemoteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` } func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTierMoveDatFromRemoteRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{74} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { if x != nil { return x.Collection } return "" } func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { if x != nil { return x.KeepRemoteDatFile } return false } type VolumeTierMoveDatFromRemoteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` } func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeTierMoveDatFromRemoteResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{75} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { if x != nil { return x.Processed } return 0 } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { if x != nil { return x.ProcessedPercentage } return 0 } type VolumeServerStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeServerStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{76} } type VolumeServerStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` } func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeServerStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{77} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { if x != nil { return x.DiskStatuses } return nil } func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { if x != nil { return x.MemoryStatus } return nil } type VolumeServerLeaveRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeServerLeaveRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{78} } type VolumeServerLeaveResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeServerLeaveResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{79} } // remote storage type FetchAndWriteNeedleRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Cookie uint32 `protobuf:"varint,3,opt,name=cookie,proto3" json:"cookie,omitempty"` Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` Replicas []*FetchAndWriteNeedleRequest_Replica `protobuf:"bytes,6,rep,name=replicas,proto3" json:"replicas,omitempty"` Auth string `protobuf:"bytes,7,opt,name=auth,proto3" json:"auth,omitempty"` // remote conf RemoteConf *remote_pb.RemoteConf `protobuf:"bytes,15,opt,name=remote_conf,json=remoteConf,proto3" json:"remote_conf,omitempty"` RemoteLocation *remote_pb.RemoteStorageLocation `protobuf:"bytes,16,opt,name=remote_location,json=remoteLocation,proto3" json:"remote_location,omitempty"` } func (x *FetchAndWriteNeedleRequest) Reset() { *x = FetchAndWriteNeedleRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FetchAndWriteNeedleRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*FetchAndWriteNeedleRequest) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{80} } func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *FetchAndWriteNeedleRequest) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } func (x *FetchAndWriteNeedleRequest) GetCookie() uint32 { if x != nil { return x.Cookie } return 0 } func (x *FetchAndWriteNeedleRequest) GetOffset() int64 { if x != nil { return x.Offset } return 0 } func (x *FetchAndWriteNeedleRequest) GetSize() int64 { if x != nil { return x.Size } return 0 } func (x *FetchAndWriteNeedleRequest) GetReplicas() []*FetchAndWriteNeedleRequest_Replica { if x != nil { return x.Replicas } return nil } func (x *FetchAndWriteNeedleRequest) GetAuth() string { if x != nil { return x.Auth } return "" } func (x *FetchAndWriteNeedleRequest) GetRemoteConf() *remote_pb.RemoteConf { if x != nil { return x.RemoteConf } return nil } func (x *FetchAndWriteNeedleRequest) GetRemoteLocation() *remote_pb.RemoteStorageLocation { if x != nil { return x.RemoteLocation } return nil } type FetchAndWriteNeedleResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *FetchAndWriteNeedleResponse) Reset() { *x = FetchAndWriteNeedleResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FetchAndWriteNeedleResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*FetchAndWriteNeedleResponse) ProtoMessage() {} func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{81} } // select on volume servers type QueryRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"` FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"` Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"` OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"` } func (x *QueryRequest) Reset() { *x = QueryRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82} } func (x *QueryRequest) GetSelections() []string { if x != nil { return x.Selections } return nil } func (x *QueryRequest) GetFromFileIds() []string { if x != nil { return x.FromFileIds } return nil } func (x *QueryRequest) GetFilter() *QueryRequest_Filter { if x != nil { return x.Filter } return nil } func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { if x != nil { return x.InputSerialization } return nil } func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { if x != nil { return x.OutputSerialization } return nil } type QueriedStripe struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` } func (x *QueriedStripe) Reset() { *x = QueriedStripe{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueriedStripe) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{83} } func (x *QueriedStripe) GetRecords() []byte { if x != nil { return x.Records } return nil } type VolumeNeedleStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` } func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeNeedleStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{84} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { if x != nil { return x.VolumeId } return 0 } func (x *VolumeNeedleStatusRequest) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } type VolumeNeedleStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"` Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"` Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` } func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *VolumeNeedleStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{85} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { if x != nil { return x.NeedleId } return 0 } func (x *VolumeNeedleStatusResponse) GetCookie() uint32 { if x != nil { return x.Cookie } return 0 } func (x *VolumeNeedleStatusResponse) GetSize() uint32 { if x != nil { return x.Size } return 0 } func (x *VolumeNeedleStatusResponse) GetLastModified() uint64 { if x != nil { return x.LastModified } return 0 } func (x *VolumeNeedleStatusResponse) GetCrc() uint32 { if x != nil { return x.Crc } return 0 } func (x *VolumeNeedleStatusResponse) GetTtl() string { if x != nil { return x.Ttl } return "" } type FetchAndWriteNeedleRequest_Replica struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` GrpcPort int32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` } func (x *FetchAndWriteNeedleRequest_Replica) Reset() { *x = FetchAndWriteNeedleRequest_Replica{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FetchAndWriteNeedleRequest_Replica) String() string { return protoimpl.X.MessageStringOf(x) } func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{80, 0} } func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string { if x != nil { return x.Url } return "" } func (x *FetchAndWriteNeedleRequest_Replica) GetPublicUrl() string { if x != nil { return x.PublicUrl } return "" } func (x *FetchAndWriteNeedleRequest_Replica) GetGrpcPort() int32 { if x != nil { return x.GrpcPort } return 0 } type QueryRequest_Filter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_Filter) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 0} } func (x *QueryRequest_Filter) GetField() string { if x != nil { return x.Field } return "" } func (x *QueryRequest_Filter) GetOperand() string { if x != nil { return x.Operand } return "" } func (x *QueryRequest_Filter) GetValue() string { if x != nil { return x.Value } return "" } type QueryRequest_InputSerialization struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // NONE | GZIP | BZIP2 CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"` CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"` JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"` ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"` } func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_InputSerialization) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { if x != nil { return x.CompressionType } return "" } func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { if x != nil { return x.CsvInput } return nil } func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { if x != nil { return x.JsonInput } return nil } func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { if x != nil { return x.ParquetInput } return nil } type QueryRequest_OutputSerialization struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` } func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_OutputSerialization) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { if x != nil { return x.CsvOutput } return nil } func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { if x != nil { return x.JsonOutput } return nil } type QueryRequest_InputSerialization_CSVInput struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # // If true, records might contain record delimiters within quote characters AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False. } func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_InputSerialization_CSVInput) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { if x != nil { return x.FileHeaderInfo } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { if x != nil { return x.RecordDelimiter } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { if x != nil { return x.FieldDelimiter } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { if x != nil { return x.QuoteCharactoer } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { if x != nil { return x.QuoteEscapeCharacter } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string { if x != nil { return x.Comments } return "" } func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { if x != nil { return x.AllowQuotedRecordDelimiter } return false } type QueryRequest_InputSerialization_JSONInput struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES } func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_InputSerialization_JSONInput) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { if x != nil { return x.Type } return "" } type QueryRequest_InputSerialization_ParquetInput struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_InputSerialization_ParquetInput) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " } func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { if x != nil { return x.QuoteFields } return "" } func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { if x != nil { return x.RecordDelimiter } return "" } func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { if x != nil { return x.FieldDelimiter } return "" } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { if x != nil { return x.QuoteCharactoer } return "" } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { if x != nil { return x.QuoteEscapeCharacter } return "" } type QueryRequest_OutputSerialization_JSONOutput struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` } func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} if protoimpl.UnsafeEnabled { mi := &file_volume_server_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { return protoimpl.X.MessageStringOf(x) } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { return file_volume_server_proto_rawDescGZIP(), []int{82, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { if x != nil { return x.RecordDelimiter } return "" } var File_volume_server_proto protoreflect.FileDescriptor var file_volume_server_proto_rawDesc = []byte{ 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x1a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6c, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x42, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xcb, 0x01, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x22, 0x5b, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdc, 0x03, 0x0a, 0x1a, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x50, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x32, 0x8c, 0x23, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x77, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_volume_server_proto_rawDescOnce sync.Once file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc ) func file_volume_server_proto_rawDescGZIP() []byte { file_volume_server_proto_rawDescOnce.Do(func() { file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData) }) return file_volume_server_proto_rawDescData } var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 95) var file_volume_server_proto_goTypes = []interface{}{ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult (*Empty)(nil), // 3: volume_server_pb.Empty (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse (*VolumeMarkWritableRequest)(nil), // 28: volume_server_pb.VolumeMarkWritableRequest (*VolumeMarkWritableResponse)(nil), // 29: volume_server_pb.VolumeMarkWritableResponse (*VolumeConfigureRequest)(nil), // 30: volume_server_pb.VolumeConfigureRequest (*VolumeConfigureResponse)(nil), // 31: volume_server_pb.VolumeConfigureResponse (*VolumeStatusRequest)(nil), // 32: volume_server_pb.VolumeStatusRequest (*VolumeStatusResponse)(nil), // 33: volume_server_pb.VolumeStatusResponse (*VolumeCopyRequest)(nil), // 34: volume_server_pb.VolumeCopyRequest (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse (*ReadAllNeedlesRequest)(nil), // 42: volume_server_pb.ReadAllNeedlesRequest (*ReadAllNeedlesResponse)(nil), // 43: volume_server_pb.ReadAllNeedlesResponse (*VolumeTailSenderRequest)(nil), // 44: volume_server_pb.VolumeTailSenderRequest (*VolumeTailSenderResponse)(nil), // 45: volume_server_pb.VolumeTailSenderResponse (*VolumeTailReceiverRequest)(nil), // 46: volume_server_pb.VolumeTailReceiverRequest (*VolumeTailReceiverResponse)(nil), // 47: volume_server_pb.VolumeTailReceiverResponse (*VolumeEcShardsGenerateRequest)(nil), // 48: volume_server_pb.VolumeEcShardsGenerateRequest (*VolumeEcShardsGenerateResponse)(nil), // 49: volume_server_pb.VolumeEcShardsGenerateResponse (*VolumeEcShardsRebuildRequest)(nil), // 50: volume_server_pb.VolumeEcShardsRebuildRequest (*VolumeEcShardsRebuildResponse)(nil), // 51: volume_server_pb.VolumeEcShardsRebuildResponse (*VolumeEcShardsCopyRequest)(nil), // 52: volume_server_pb.VolumeEcShardsCopyRequest (*VolumeEcShardsCopyResponse)(nil), // 53: volume_server_pb.VolumeEcShardsCopyResponse (*VolumeEcShardsDeleteRequest)(nil), // 54: volume_server_pb.VolumeEcShardsDeleteRequest (*VolumeEcShardsDeleteResponse)(nil), // 55: volume_server_pb.VolumeEcShardsDeleteResponse (*VolumeEcShardsMountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsMountRequest (*VolumeEcShardsMountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsMountResponse (*VolumeEcShardsUnmountRequest)(nil), // 58: volume_server_pb.VolumeEcShardsUnmountRequest (*VolumeEcShardsUnmountResponse)(nil), // 59: volume_server_pb.VolumeEcShardsUnmountResponse (*VolumeEcShardReadRequest)(nil), // 60: volume_server_pb.VolumeEcShardReadRequest (*VolumeEcShardReadResponse)(nil), // 61: volume_server_pb.VolumeEcShardReadResponse (*VolumeEcBlobDeleteRequest)(nil), // 62: volume_server_pb.VolumeEcBlobDeleteRequest (*VolumeEcBlobDeleteResponse)(nil), // 63: volume_server_pb.VolumeEcBlobDeleteResponse (*VolumeEcShardsToVolumeRequest)(nil), // 64: volume_server_pb.VolumeEcShardsToVolumeRequest (*VolumeEcShardsToVolumeResponse)(nil), // 65: volume_server_pb.VolumeEcShardsToVolumeResponse (*ReadVolumeFileStatusRequest)(nil), // 66: volume_server_pb.ReadVolumeFileStatusRequest (*ReadVolumeFileStatusResponse)(nil), // 67: volume_server_pb.ReadVolumeFileStatusResponse (*DiskStatus)(nil), // 68: volume_server_pb.DiskStatus (*MemStatus)(nil), // 69: volume_server_pb.MemStatus (*RemoteFile)(nil), // 70: volume_server_pb.RemoteFile (*VolumeInfo)(nil), // 71: volume_server_pb.VolumeInfo (*VolumeTierMoveDatToRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatToRemoteRequest (*VolumeTierMoveDatToRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatToRemoteResponse (*VolumeTierMoveDatFromRemoteRequest)(nil), // 74: volume_server_pb.VolumeTierMoveDatFromRemoteRequest (*VolumeTierMoveDatFromRemoteResponse)(nil), // 75: volume_server_pb.VolumeTierMoveDatFromRemoteResponse (*VolumeServerStatusRequest)(nil), // 76: volume_server_pb.VolumeServerStatusRequest (*VolumeServerStatusResponse)(nil), // 77: volume_server_pb.VolumeServerStatusResponse (*VolumeServerLeaveRequest)(nil), // 78: volume_server_pb.VolumeServerLeaveRequest (*VolumeServerLeaveResponse)(nil), // 79: volume_server_pb.VolumeServerLeaveResponse (*FetchAndWriteNeedleRequest)(nil), // 80: volume_server_pb.FetchAndWriteNeedleRequest (*FetchAndWriteNeedleResponse)(nil), // 81: volume_server_pb.FetchAndWriteNeedleResponse (*QueryRequest)(nil), // 82: volume_server_pb.QueryRequest (*QueriedStripe)(nil), // 83: volume_server_pb.QueriedStripe (*VolumeNeedleStatusRequest)(nil), // 84: volume_server_pb.VolumeNeedleStatusRequest (*VolumeNeedleStatusResponse)(nil), // 85: volume_server_pb.VolumeNeedleStatusResponse (*FetchAndWriteNeedleRequest_Replica)(nil), // 86: volume_server_pb.FetchAndWriteNeedleRequest.Replica (*QueryRequest_Filter)(nil), // 87: volume_server_pb.QueryRequest.Filter (*QueryRequest_InputSerialization)(nil), // 88: volume_server_pb.QueryRequest.InputSerialization (*QueryRequest_OutputSerialization)(nil), // 89: volume_server_pb.QueryRequest.OutputSerialization (*QueryRequest_InputSerialization_CSVInput)(nil), // 90: volume_server_pb.QueryRequest.InputSerialization.CSVInput (*QueryRequest_InputSerialization_JSONInput)(nil), // 91: volume_server_pb.QueryRequest.InputSerialization.JSONInput (*QueryRequest_InputSerialization_ParquetInput)(nil), // 92: volume_server_pb.QueryRequest.InputSerialization.ParquetInput (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 93: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 94: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput (*remote_pb.RemoteConf)(nil), // 95: remote_pb.RemoteConf (*remote_pb.RemoteStorageLocation)(nil), // 96: remote_pb.RemoteStorageLocation } var file_volume_server_proto_depIdxs = []int32{ 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult 70, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile 68, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus 69, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus 86, // 4: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica 95, // 5: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf 96, // 6: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation 87, // 7: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter 88, // 8: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization 89, // 9: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization 90, // 10: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput 91, // 11: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput 92, // 12: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput 93, // 13: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput 94, // 14: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput 0, // 15: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest 4, // 16: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest 6, // 17: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest 8, // 18: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest 10, // 19: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest 12, // 20: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest 14, // 21: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest 16, // 22: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest 18, // 23: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest 20, // 24: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest 22, // 25: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest 24, // 26: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest 26, // 27: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest 28, // 28: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest 30, // 29: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest 32, // 30: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest 34, // 31: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest 66, // 32: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest 36, // 33: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest 38, // 34: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest 40, // 35: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest 42, // 36: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest 44, // 37: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest 46, // 38: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest 48, // 39: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest 50, // 40: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest 52, // 41: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest 54, // 42: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest 56, // 43: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest 58, // 44: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest 60, // 45: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest 62, // 46: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest 64, // 47: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest 72, // 48: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest 74, // 49: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest 76, // 50: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest 78, // 51: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest 80, // 52: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest 82, // 53: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest 84, // 54: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest 1, // 55: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse 5, // 56: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse 7, // 57: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse 9, // 58: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse 11, // 59: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse 13, // 60: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse 15, // 61: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse 17, // 62: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse 19, // 63: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse 21, // 64: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse 23, // 65: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse 25, // 66: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse 27, // 67: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse 29, // 68: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse 31, // 69: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse 33, // 70: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse 35, // 71: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse 67, // 72: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse 37, // 73: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse 39, // 74: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse 41, // 75: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse 43, // 76: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse 45, // 77: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse 47, // 78: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse 49, // 79: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse 51, // 80: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse 53, // 81: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse 55, // 82: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse 57, // 83: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse 59, // 84: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse 61, // 85: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse 63, // 86: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse 65, // 87: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse 73, // 88: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse 75, // 89: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse 77, // 90: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse 79, // 91: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse 81, // 92: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse 83, // 93: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe 85, // 94: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse 55, // [55:95] is the sub-list for method output_type 15, // [15:55] is the sub-list for method input_type 15, // [15:15] is the sub-list for extension type_name 15, // [15:15] is the sub-list for extension extendee 0, // [0:15] is the sub-list for field type_name } func init() { file_volume_server_proto_init() } func file_volume_server_proto_init() { if File_volume_server_proto != nil { return } if !protoimpl.UnsafeEnabled { file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchDeleteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchDeleteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteResult); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Empty); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCheckRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCheckResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCompactRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCompactResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCommitRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCommitResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCleanupRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VacuumVolumeCleanupResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteCollectionRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteCollectionResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AllocateVolumeRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AllocateVolumeResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeSyncStatusRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeSyncStatusResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeIncrementalCopyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeIncrementalCopyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMountRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMountResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeUnmountRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeUnmountResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeDeleteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeDeleteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMarkReadonlyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMarkReadonlyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMarkWritableRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeMarkWritableResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeConfigureRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeConfigureResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeStatusRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeStatusResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeCopyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeCopyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CopyFileRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CopyFileResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadNeedleBlobRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadNeedleBlobResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WriteNeedleBlobRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WriteNeedleBlobResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadAllNeedlesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadAllNeedlesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTailSenderRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTailSenderResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTailReceiverRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTailReceiverResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsGenerateRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsGenerateResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsRebuildRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsRebuildResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsCopyRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsCopyResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsDeleteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsDeleteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsMountRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsMountResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsUnmountRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsUnmountResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardReadRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardReadResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcBlobDeleteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcBlobDeleteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsToVolumeRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeEcShardsToVolumeResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadVolumeFileStatusRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadVolumeFileStatusResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DiskStatus); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MemStatus); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoteFile); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeInfo); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeServerStatusRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeServerStatusResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeServerLeaveRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeServerLeaveResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchAndWriteNeedleRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchAndWriteNeedleResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueriedStripe); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeNeedleStatusRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VolumeNeedleStatusResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FetchAndWriteNeedleRequest_Replica); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_Filter); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_InputSerialization); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_OutputSerialization); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_volume_server_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_volume_server_proto_rawDesc, NumEnums: 0, NumMessages: 95, NumExtensions: 0, NumServices: 1, }, GoTypes: file_volume_server_proto_goTypes, DependencyIndexes: file_volume_server_proto_depIdxs, MessageInfos: file_volume_server_proto_msgTypes, }.Build() File_volume_server_proto = out.File file_volume_server_proto_rawDesc = nil file_volume_server_proto_goTypes = nil file_volume_server_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // VolumeServerClient is the client API for VolumeServer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VolumeServerClient interface { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) // tiered storage VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) // <experimental> query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) } type volumeServerClient struct { cc grpc.ClientConnInterface } func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { return &volumeServerClient{cc} } func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { out := new(BatchDeleteResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { out := new(VacuumVolumeCheckResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VacuumVolumeCompact", opts...) if err != nil { return nil, err } x := &volumeServerVacuumVolumeCompactClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VacuumVolumeCompactClient interface { Recv() (*VacuumVolumeCompactResponse, error) grpc.ClientStream } type volumeServerVacuumVolumeCompactClient struct { grpc.ClientStream } func (x *volumeServerVacuumVolumeCompactClient) Recv() (*VacuumVolumeCompactResponse, error) { m := new(VacuumVolumeCompactResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { out := new(VacuumVolumeCommitResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { out := new(VacuumVolumeCleanupResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { out := new(AllocateVolumeResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { out := new(VolumeSyncStatusResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } x := &volumeServerVolumeIncrementalCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeIncrementalCopyClient interface { Recv() (*VolumeIncrementalCopyResponse, error) grpc.ClientStream } type volumeServerVolumeIncrementalCopyClient struct { grpc.ClientStream } func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) { m := new(VolumeIncrementalCopyResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { out := new(VolumeMountResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { out := new(VolumeUnmountResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { out := new(VolumeDeleteResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { out := new(VolumeMarkReadonlyResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) { out := new(VolumeMarkWritableResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { out := new(VolumeConfigureResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) { out := new(VolumeStatusResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeCopy", opts...) if err != nil { return nil, err } x := &volumeServerVolumeCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeCopyClient interface { Recv() (*VolumeCopyResponse, error) grpc.ClientStream } type volumeServerVolumeCopyClient struct { grpc.ClientStream } func (x *volumeServerVolumeCopyClient) Recv() (*VolumeCopyResponse, error) { m := new(VolumeCopyResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { out := new(ReadVolumeFileStatusResponse)
return out, nil } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } x := &volumeServerCopyFileClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_CopyFileClient interface { Recv() (*CopyFileResponse, error) grpc.ClientStream } type volumeServerCopyFileClient struct { grpc.ClientStream } func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { m := new(CopyFileResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { out := new(ReadNeedleBlobResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { out := new(WriteNeedleBlobResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/ReadAllNeedles", opts...) if err != nil { return nil, err } x := &volumeServerReadAllNeedlesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_ReadAllNeedlesClient interface { Recv() (*ReadAllNeedlesResponse, error) grpc.ClientStream } type volumeServerReadAllNeedlesClient struct { grpc.ClientStream } func (x *volumeServerReadAllNeedlesClient) Recv() (*ReadAllNeedlesResponse, error) { m := new(ReadAllNeedlesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } x := &volumeServerVolumeTailSenderClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeTailSenderClient interface { Recv() (*VolumeTailSenderResponse, error) grpc.ClientStream } type volumeServerVolumeTailSenderClient struct { grpc.ClientStream } func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) { m := new(VolumeTailSenderResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { out := new(VolumeTailReceiverResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { out := new(VolumeEcShardsGenerateResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { out := new(VolumeEcShardsRebuildResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { out := new(VolumeEcShardsCopyResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { out := new(VolumeEcShardsDeleteResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { out := new(VolumeEcShardsMountResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { out := new(VolumeEcShardsUnmountResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } x := &volumeServerVolumeEcShardReadClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeEcShardReadClient interface { Recv() (*VolumeEcShardReadResponse, error) grpc.ClientStream } type volumeServerVolumeEcShardReadClient struct { grpc.ClientStream } func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) { m := new(VolumeEcShardReadResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { out := new(VolumeEcBlobDeleteResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { out := new(VolumeEcShardsToVolumeResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[7], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeTierMoveDatToRemoteClient interface { Recv() (*VolumeTierMoveDatToRemoteResponse, error) grpc.ClientStream } type volumeServerVolumeTierMoveDatToRemoteClient struct { grpc.ClientStream } func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { m := new(VolumeTierMoveDatToRemoteResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[8], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { Recv() (*VolumeTierMoveDatFromRemoteResponse, error) grpc.ClientStream } type volumeServerVolumeTierMoveDatFromRemoteClient struct { grpc.ClientStream } func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { m := new(VolumeTierMoveDatFromRemoteResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { out := new(VolumeServerStatusResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) { out := new(VolumeServerLeaveResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) { out := new(FetchAndWriteNeedleResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[9], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } x := &volumeServerQueryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type VolumeServer_QueryClient interface { Recv() (*QueriedStripe, error) grpc.ClientStream } type volumeServerQueryClient struct { grpc.ClientStream } func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { m := new(QueriedStripe) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) { out := new(VolumeNeedleStatusResponse) err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } // VolumeServerServer is the server API for VolumeServer service. type VolumeServerServer interface { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) // tiered storage VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) // <experimental> query Query(*QueryRequest, VolumeServer_QueryServer) error VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) } // UnimplementedVolumeServerServer can be embedded to have forward compatible implementations. type UnimplementedVolumeServerServer struct { } func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") } func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") } func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error { return status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") } func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented") } func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented") } func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") } func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented") } func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") } func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") } func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented") } func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented") } func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented") } func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") } func (*UnimplementedVolumeServerServer) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkWritable not implemented") } func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented") } func (*UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented") } func (*UnimplementedVolumeServerServer) VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") } func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") } func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") } func (*UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented") } func (*UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") } func (*UnimplementedVolumeServerServer) ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error { return status.Errorf(codes.Unimplemented, "method ReadAllNeedles not implemented") } func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") } func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented") } func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented") } func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") } func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") } func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented") } func (*UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeServerLeave not implemented") } func (*UnimplementedVolumeServerServer) FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchAndWriteNeedle not implemented") } func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { return status.Errorf(codes.Unimplemented, "method Query not implemented") } func (*UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeNeedleStatus not implemented") } func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { s.RegisterService(&_VolumeServer_serviceDesc, srv) } func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BatchDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).BatchDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/BatchDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VacuumVolumeCompactRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VacuumVolumeCompact(m, &volumeServerVacuumVolumeCompactServer{stream}) } type VolumeServer_VacuumVolumeCompactServer interface { Send(*VacuumVolumeCompactResponse) error grpc.ServerStream } type volumeServerVacuumVolumeCompactServer struct { grpc.ServerStream } func (x *volumeServerVacuumVolumeCompactServer) Send(m *VacuumVolumeCompactResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCommitRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCleanupRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteCollectionRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).DeleteCollection(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AllocateVolumeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).AllocateVolume(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeSyncStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeSyncStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeIncrementalCopyRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream}) } type VolumeServer_VolumeIncrementalCopyServer interface { Send(*VolumeIncrementalCopyResponse) error grpc.ServerStream } type volumeServerVolumeIncrementalCopyServer struct { grpc.ServerStream } func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeMount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeUnmountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeUnmount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMarkReadonlyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMarkWritableRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeMarkWritable(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeConfigureRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeConfigure(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeCopy_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeCopyRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeCopy(m, &volumeServerVolumeCopyServer{stream}) } type VolumeServer_VolumeCopyServer interface { Send(*VolumeCopyResponse) error grpc.ServerStream } type volumeServerVolumeCopyServer struct { grpc.ServerStream } func (x *volumeServerVolumeCopyServer) Send(m *VolumeCopyResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadVolumeFileStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CopyFileRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) } type VolumeServer_CopyFileServer interface { Send(*CopyFileResponse) error grpc.ServerStream } type volumeServerCopyFileServer struct { grpc.ServerStream } func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadNeedleBlobRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).ReadNeedleBlob(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WriteNeedleBlobRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).WriteNeedleBlob(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_ReadAllNeedles_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ReadAllNeedlesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).ReadAllNeedles(m, &volumeServerReadAllNeedlesServer{stream}) } type VolumeServer_ReadAllNeedlesServer interface { Send(*ReadAllNeedlesResponse) error grpc.ServerStream } type volumeServerReadAllNeedlesServer struct { grpc.ServerStream } func (x *volumeServerReadAllNeedlesServer) Send(m *ReadAllNeedlesResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream}) } type VolumeServer_VolumeTailSenderServer interface { Send(*VolumeTailSenderResponse) error grpc.ServerStream } type volumeServerVolumeTailSenderServer struct { grpc.ServerStream } func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeTailReceiverRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeTailReceiver(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsGenerateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsRebuildRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsCopyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsMountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsUnmountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeEcShardReadRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream}) } type VolumeServer_VolumeEcShardReadServer interface { Send(*VolumeEcShardReadResponse) error grpc.ServerStream } type volumeServerVolumeEcShardReadServer struct { grpc.ServerStream } func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcBlobDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcShardsToVolumeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTierMoveDatToRemoteRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) } type VolumeServer_VolumeTierMoveDatToRemoteServer interface { Send(*VolumeTierMoveDatToRemoteResponse) error grpc.ServerStream } type volumeServerVolumeTierMoveDatToRemoteServer struct { grpc.ServerStream } func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTierMoveDatFromRemoteRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) } type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { Send(*VolumeTierMoveDatFromRemoteResponse) error grpc.ServerStream } type volumeServerVolumeTierMoveDatFromRemoteServer struct { grpc.ServerStream } func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeServerStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeServerLeaveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeServerLeave(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_FetchAndWriteNeedle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FetchAndWriteNeedleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).FetchAndWriteNeedle(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).FetchAndWriteNeedle(ctx, req.(*FetchAndWriteNeedleRequest)) } return interceptor(ctx, in, info, handler) } func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(VolumeServerServer).Query(m, &volumeServerQueryServer{stream}) } type VolumeServer_QueryServer interface { Send(*QueriedStripe) error grpc.ServerStream } type volumeServerQueryServer struct { grpc.ServerStream } func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { return x.ServerStream.SendMsg(m) } func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeNeedleStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest)) } return interceptor(ctx, in, info, handler) } var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "BatchDelete", Handler: _VolumeServer_BatchDelete_Handler, }, { MethodName: "VacuumVolumeCheck", Handler: _VolumeServer_VacuumVolumeCheck_Handler, }, { MethodName: "VacuumVolumeCommit", Handler: _VolumeServer_VacuumVolumeCommit_Handler, }, { MethodName: "VacuumVolumeCleanup", Handler: _VolumeServer_VacuumVolumeCleanup_Handler, }, { MethodName: "DeleteCollection", Handler: _VolumeServer_DeleteCollection_Handler, }, { MethodName: "AllocateVolume", Handler: _VolumeServer_AllocateVolume_Handler, }, { MethodName: "VolumeSyncStatus", Handler: _VolumeServer_VolumeSyncStatus_Handler, }, { MethodName: "VolumeMount", Handler: _VolumeServer_VolumeMount_Handler, }, { MethodName: "VolumeUnmount", Handler: _VolumeServer_VolumeUnmount_Handler, }, { MethodName: "VolumeDelete", Handler: _VolumeServer_VolumeDelete_Handler, }, { MethodName: "VolumeMarkReadonly", Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, { MethodName: "VolumeMarkWritable", Handler: _VolumeServer_VolumeMarkWritable_Handler, }, { MethodName: "VolumeConfigure", Handler: _VolumeServer_VolumeConfigure_Handler, }, { MethodName: "VolumeStatus", Handler: _VolumeServer_VolumeStatus_Handler, }, { MethodName: "ReadVolumeFileStatus", Handler: _VolumeServer_ReadVolumeFileStatus_Handler, }, { MethodName: "ReadNeedleBlob", Handler: _VolumeServer_ReadNeedleBlob_Handler, }, { MethodName: "WriteNeedleBlob", Handler: _VolumeServer_WriteNeedleBlob_Handler, }, { MethodName: "VolumeTailReceiver", Handler: _VolumeServer_VolumeTailReceiver_Handler, }, { MethodName: "VolumeEcShardsGenerate", Handler: _VolumeServer_VolumeEcShardsGenerate_Handler, }, { MethodName: "VolumeEcShardsRebuild", Handler: _VolumeServer_VolumeEcShardsRebuild_Handler, }, { MethodName: "VolumeEcShardsCopy", Handler: _VolumeServer_VolumeEcShardsCopy_Handler, }, { MethodName: "VolumeEcShardsDelete", Handler: _VolumeServer_VolumeEcShardsDelete_Handler, }, { MethodName: "VolumeEcShardsMount", Handler: _VolumeServer_VolumeEcShardsMount_Handler, }, { MethodName: "VolumeEcShardsUnmount", Handler: _VolumeServer_VolumeEcShardsUnmount_Handler, }, { MethodName: "VolumeEcBlobDelete", Handler: _VolumeServer_VolumeEcBlobDelete_Handler, }, { MethodName: "VolumeEcShardsToVolume", Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, }, { MethodName: "VolumeServerStatus", Handler: _VolumeServer_VolumeServerStatus_Handler, }, { MethodName: "VolumeServerLeave", Handler: _VolumeServer_VolumeServerLeave_Handler, }, { MethodName: "FetchAndWriteNeedle", Handler: _VolumeServer_FetchAndWriteNeedle_Handler, }, { MethodName: "VolumeNeedleStatus", Handler: _VolumeServer_VolumeNeedleStatus_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "VacuumVolumeCompact", Handler: _VolumeServer_VacuumVolumeCompact_Handler, ServerStreams: true, }, { StreamName: "VolumeIncrementalCopy", Handler: _VolumeServer_VolumeIncrementalCopy_Handler, ServerStreams: true, }, { StreamName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, ServerStreams: true, }, { StreamName: "CopyFile", Handler: _VolumeServer_CopyFile_Handler, ServerStreams: true, }, { StreamName: "ReadAllNeedles", Handler: _VolumeServer_ReadAllNeedles_Handler, ServerStreams: true, }, { StreamName: "VolumeTailSender", Handler: _VolumeServer_VolumeTailSender_Handler, ServerStreams: true, }, { StreamName: "VolumeEcShardRead", Handler: _VolumeServer_VolumeEcShardRead_Handler, ServerStreams: true, }, { StreamName: "VolumeTierMoveDatToRemote", Handler: _VolumeServer_VolumeTierMoveDatToRemote_Handler, ServerStreams: true, }, { StreamName: "VolumeTierMoveDatFromRemote", Handler: _VolumeServer_VolumeTierMoveDatFromRemote_Handler, ServerStreams: true, }, { StreamName: "Query", Handler: _VolumeServer_Query_Handler, ServerStreams: true, }, }, Metadata: "volume_server.proto", }
err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) if err != nil { return nil, err }
job.js
/** * General Notes: * None */ var humanInterval = require('human-interval'), CronTime = require('cron').CronTime, date = require('date.js'), moment = require('moment-timezone'), debug = require('debug')('agenda:job'); var Job = module.exports = function Job(args) { args = args || {}; // Remove special args this.agenda = args.agenda; delete args.agenda; // Process args args.priority = parsePriority(args.priority) || 0; // Set attrs to args var attrs = {}; for (var key in args) { if (args.hasOwnProperty(key)) { attrs[key] = args[key]; } } // Set defaults if undefined // NOTE: What is the difference between 'once' here and 'single' in agenda.js? attrs.nextRunAt = attrs.nextRunAt || new Date(); attrs.type = attrs.type || 'once'; this.attrs = attrs; }; /** * Given a job, turn it into an object we can store in Mongo * @returns {Object} json object from Job */ Job.prototype.toJSON = function() { var self = this, attrs = self.attrs || {}; var result = {}; for (var prop in attrs) { if (attrs.hasOwnProperty(prop)) { result[prop] = attrs[prop]; } } var dates = ['lastRunAt', 'lastFinishedAt', 'nextRunAt', 'failedAt', 'lockedAt']; dates.forEach(function(d) { if (result[d]) { result[d] = new Date(result[d]); } }); return result; }; /** * Internal method used to compute next time a job should run and sets the proper values * @returns {exports} instance of Job instance */ Job.prototype.computeNextRunAt = function() { var interval = this.attrs.repeatInterval; var timezone = this.attrs.repeatTimezone; var repeatAt = this.attrs.repeatAt; this.attrs.nextRunAt = undefined; if (interval) { computeFromInterval.call(this); } else if (repeatAt) { computeFromRepeatAt.call(this); } return this; function dateForTimezone(d) { d = moment(d); if (timezone) { d.tz(timezone); } return d; } /** * Internal method that computes the interval * @returns {undefined} */ function
() { debug('[%s:%s] computing next run via interval [%s]', this.attrs.name, this.attrs._id, interval); var lastRun = this.attrs.lastRunAt || new Date(); lastRun = dateForTimezone(lastRun); try { var cronTime = new CronTime(interval); var nextDate = cronTime._getNextDateFrom(lastRun); if (nextDate.valueOf() === lastRun.valueOf()) { // Handle cronTime giving back the same date for the next run time nextDate = cronTime._getNextDateFrom(dateForTimezone(new Date(lastRun.valueOf() + 1000))); } this.attrs.nextRunAt = nextDate; debug('[%s:%s] nextRunAt set to [%s]', this.attrs.name, this.attrs._id, this.attrs.nextRunAt.toISOString()); } catch (e) { // Nope, humanInterval then! try { if (!this.attrs.lastRunAt && humanInterval(interval)) { this.attrs.nextRunAt = lastRun.valueOf(); debug('[%s:%s] nextRunAt set to [%s]', this.attrs.name, this.attrs._id, this.attrs.nextRunAt.toISOString()); } else { this.attrs.nextRunAt = lastRun.valueOf() + humanInterval(interval); debug('[%s:%s] nextRunAt set to [%s]', this.attrs.name, this.attrs._id, this.attrs.nextRunAt.toISOString()); } } catch (e) {} } finally { if (isNaN(this.attrs.nextRunAt)) { this.attrs.nextRunAt = undefined; debug('[%s:%s] failed to calculate nextRunAt due to invalid repeat interval', this.attrs.name, this.attrs._id); this.fail('failed to calculate nextRunAt due to invalid repeat interval'); } } } /** * Internal method to compute next run time from the repeat string * @returns {undefined} */ function computeFromRepeatAt() { var lastRun = this.attrs.lastRunAt || new Date(); var nextDate = date(repeatAt).valueOf(); var offset = Date.now(); // if you do not specify offset date for below test it will fail for ms if (offset === date(repeatAt,offset).valueOf()) { this.attrs.nextRunAt = undefined; debug('[%s:%s] failed to calculate repeatAt due to invalid format', this.attrs.name, this.attrs._id); this.fail('failed to calculate repeatAt time due to invalid format'); } else if (nextDate.valueOf() === lastRun.valueOf()) { this.attrs.nextRunAt = date('tomorrow at ', repeatAt); debug('[%s:%s] nextRunAt set to [%s]', this.attrs.name, this.attrs._id, this.attrs.nextRunAt.toISOString()); } else { this.attrs.nextRunAt = date(repeatAt); debug('[%s:%s] nextRunAt set to [%s]', this.attrs.name, this.attrs._id, this.attrs.nextRunAt.toISOString()); } } }; /** * Sets a job to repeat every X amount of time * @param {String} interval repeat every X * @param {Object} options options to use for job * @returns {exports} instance of Job */ Job.prototype.repeatEvery = function(interval, options) { options = options || {}; this.attrs.repeatInterval = interval; this.attrs.repeatTimezone = options.timezone ? options.timezone : null; return this; }; /** * Sets a job to repeat at a specific time * @param {String} time time to repeat job at (human readable or number) * @returns {exports} instance of Job */ Job.prototype.repeatAt = function(time) { this.attrs.repeatAt = time; return this; }; /** * Prevents the job type from running * @returns {exports} instance of Job */ Job.prototype.disable = function() { this.attrs.disabled = true; return this; }; /** * Allows job type to run * @returns {exports} instance of Job */ Job.prototype.enable = function() { this.attrs.disabled = false; return this; }; /** * Data to ensure is unique for job to be created * @param {Object} unique mongo data query for unique * @param {Object} opts unique options * @returns {exports} instance of Job */ Job.prototype.unique = function(unique, opts) { this.attrs.unique = unique; this.attrs.uniqueOpts = opts; return this; }; /** * Schedules a job to run at specified time * @param {String} time schedule a job to run "then" * @returns {exports} instance of Job */ Job.prototype.schedule = function(time) { this.attrs.nextRunAt = (time instanceof Date) ? time : date(time); return this; }; /** * Sets priority of the job * @param {String} priority priority of when job should be queued * @returns {exports} instance of Job */ Job.prototype.priority = function(priority) { this.attrs.priority = parsePriority(priority); return this; }; /** * Fails the job with a reason (error) specified * @param {Error|String} reason reason job failed * @returns {exports} instance of Job */ Job.prototype.fail = function(reason) { if (reason instanceof Error) { reason = reason.message; } this.attrs.failReason = reason; this.attrs.failCount = (this.attrs.failCount || 0) + 1; var now = new Date(); this.attrs.failedAt = now; this.attrs.lastFinishedAt = now; debug('[%s:%s] fail() called [%d] times so far', this.attrs.name, this.attrs._id, this.attrs.failCount); return this; }; /** * Internal method (RUN) * @param {Function} cb called when job persistence in MongoDB fails or passes * @returns {undefined} */ Job.prototype.run = function(cb) { var self = this, agenda = self.agenda, definition = agenda._definitions[self.attrs.name]; var setImmediate = setImmediate || process.nextTick; // eslint-disable-line no-use-before-define setImmediate(function() { self.attrs.lastRunAt = new Date(); debug('[%s:%s] setting lastRunAt to: %s', self.attrs.name, self.attrs._id, self.attrs.lastRunAt.toISOString()); self.computeNextRunAt(); self.save(function() { var jobCallback = function(err) { if (err) { self.fail(err); } if (!err) self.attrs.lastFinishedAt = new Date(); self.attrs.lockedAt = null; debug('[%s:%s] job finished at [%s] and was unlocked', self.attrs.name, self.attrs._id, self.attrs.lastFinishedAt); self.save(function(saveErr, job) { cb && cb(err || saveErr, job); if (err) { agenda.emit('fail', err, self); agenda.emit('fail:' + self.attrs.name, err, self); debug('[%s:%s] failed to be saved to MongoDB', self.attrs.name, self.attrs._id); } else { agenda.emit('success', self); agenda.emit('success:' + self.attrs.name, self); debug('[%s:%s] was saved successfully to MongoDB', self.attrs.name, self.attrs._id); } agenda.emit('complete', self); agenda.emit('complete:' + self.attrs.name, self); debug('[%s:%s] job has finished', self.attrs.name, self.attrs._id); }); }; try { agenda.emit('start', self); agenda.emit('start:' + self.attrs.name, self); debug('[%s:%s] starting job', self.attrs.name, self.attrs._id); if (!definition) { debug('[%s:%s] has no definition, can not run', self.attrs.name, self.attrs._id); throw new Error('Undefined job'); } if (definition.fn.length === 2) { debug('[%s:%s] process function being called', self.attrs.name, self.attrs._id); definition.fn(self, jobCallback); } else { debug('[%s:%s] process function being called', self.attrs.name, self.attrs._id); definition.fn(self); jobCallback(); } } catch (e) { debug('[%s:%s] unknown error occurred', self.attrs.name, self.attrs._id); jobCallback(e); } }); }); }; /** * A job is running if: * (lastRunAt exists AND lastFinishedAt does not exist) * OR * (lastRunAt exists AND lastFinishedAt exists but the lastRunAt is newer [in time] than lastFinishedAt) * @returns {boolean} whether or not job is running at the moment (true for running) */ Job.prototype.isRunning = function() { if (!this.attrs.lastRunAt) return false; if (!this.attrs.lastFinishedAt) return true; if (this.attrs.lockedAt && this.attrs.lastRunAt.getTime() > this.attrs.lastFinishedAt.getTime()) { return true; } return false; }; /** * Saves a job into the MongoDB * @param {Function} cb called after job is saved or errors * @returns {exports} instance of Job */ Job.prototype.save = function(cb) { this.agenda.saveJob(this, cb); return this; }; /** * Remove the job from MongoDB * @param {Function} cb called when job removal fails or passes * @returns {undefined} */ Job.prototype.remove = function(cb) { this.agenda.cancel( {_id: this.attrs._id}, cb ); }; /** * Updates "lockedAt" time so the job does not get picked up again * @param {Function} cb called when job "touch" fails or passes * @returns {undefined} */ Job.prototype.touch = function(cb) { this.attrs.lockedAt = new Date(); this.save(cb); }; /** * Internal method to turn priority into a number * @param {String|Number} priority string to parse into number * @returns {Number} priority that was parsed */ function parsePriority(priority) { var priorityMap = { lowest: -20, low: -10, normal: 0, high: 10, highest: 20 }; if (typeof priority === 'number' || priority instanceof Number) { return priority; } else { return priorityMap[priority]; } }
computeFromInterval
mod.rs
// Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use super::{ElectionProof, Error, Ticket, TipsetKeys}; use address::Address; use beacon::{self, Beacon, BeaconEntry, BeaconSchedule}; use cid::{Cid, Code::Blake2b256}; use clock::ChainEpoch; use crypto::Signature; use derive_builder::Builder; use encoding::blake2b_256; use encoding::{Cbor, Error as EncodingError}; use fil_types::{PoStProof, BLOCKS_PER_EPOCH}; use num_bigint::{ bigint_ser::{BigIntDe, BigIntSer}, BigInt, }; use once_cell::sync::OnceCell; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use sha2::Digest; use std::fmt; use vm::TokenAmount; #[cfg(feature = "json")] pub mod json; const SHA_256_BITS: usize = 256; /// Header of a block /// /// Usage: /// ``` /// use forest_blocks::{BlockHeader, TipsetKeys, Ticket}; /// use address::Address; /// use cid::{Cid, Code::Identity}; /// use num_bigint::BigInt; /// use crypto::Signature; /// /// BlockHeader::builder() /// .messages(cid::new_from_cbor(&[], Identity)) // required /// .message_receipts(cid::new_from_cbor(&[], Identity)) // required /// .state_root(cid::new_from_cbor(&[], Identity)) // required /// .miner_address(Address::new_id(0)) // optional /// .beacon_entries(Vec::new()) // optional /// .winning_post_proof(Vec::new()) // optional /// .election_proof(None) // optional /// .bls_aggregate(None) // optional /// .signature(None) // optional /// .parents(TipsetKeys::default()) // optional /// .weight(BigInt::from(0u8)) // optional /// .epoch(0) // optional /// .timestamp(0) // optional /// .ticket(Some(Ticket::default())) // optional /// .fork_signal(0) // optional /// .build() /// .unwrap(); /// ``` #[derive(Clone, Debug, Builder)] #[builder(name = "BlockHeaderBuilder")] pub struct BlockHeader { // CHAIN LINKING /// Parents is the set of parents this block was based on. Typically one, /// but can be several in the case where there were multiple winning ticket- /// holders for an epoch #[builder(default)] parents: TipsetKeys, /// weight is the aggregate chain weight of the parent set #[builder(default)] weight: BigInt, /// epoch is the period in which a new block is generated. /// There may be multiple rounds in an epoch. #[builder(default)] epoch: ChainEpoch, /// BeaconEntries contain the verifiable oracle randomness used to elect /// this block's author leader #[builder(default)] beacon_entries: Vec<BeaconEntry>, /// PoStProofs are the winning post proofs #[builder(default)] winning_post_proof: Vec<PoStProof>, // MINER INFO /// miner_address is the address of the miner actor that mined this block miner_address: Address, // STATE /// messages contains the Cid to the merkle links for bls_messages and secp_messages #[builder(default)] messages: Cid, /// message_receipts is the Cid of the root of an array of MessageReceipts #[builder(default)] message_receipts: Cid, /// state_root is a cid pointer to the parent state root after calculating parent tipset. #[builder(default)] state_root: Cid, #[builder(default)] fork_signal: u64, #[builder(default)] pub signature: Option<Signature>, #[builder(default)] election_proof: Option<ElectionProof>, // CONSENSUS /// timestamp, in seconds since the Unix epoch, at which this block was created #[builder(default)] timestamp: u64, /// the ticket submitted with this block #[builder(default)] ticket: Option<Ticket>, // SIGNATURES /// aggregate signature of miner in block #[builder(default)] bls_aggregate: Option<Signature>, /// the base fee of the parent block #[builder(default)] parent_base_fee: TokenAmount, // CACHE /// stores the cid for the block after the first call to `cid()` #[builder(default, setter(skip))] cached_cid: OnceCell<Cid>, /// stores the hashed bytes of the block after the first call to `cached_bytes()` #[builder(default, setter(skip))] cached_bytes: OnceCell<Vec<u8>>, /// Cached signature validation #[builder(setter(skip), default)] is_validated: OnceCell<bool>, } impl PartialEq for BlockHeader { fn
(&self, other: &Self) -> bool { self.cid().eq(other.cid()) } } impl Cbor for BlockHeader { fn marshal_cbor(&self) -> Result<Vec<u8>, EncodingError> { Ok(self.cached_bytes().clone()) } fn cid(&self) -> Result<Cid, EncodingError> { Ok(*self.cid()) } } impl Serialize for BlockHeader { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { ( &self.miner_address, &self.ticket, &self.election_proof, &self.beacon_entries, &self.winning_post_proof, &self.parents, BigIntSer(&self.weight), &self.epoch, &self.state_root, &self.message_receipts, &self.messages, &self.bls_aggregate, &self.timestamp, &self.signature, &self.fork_signal, BigIntSer(&self.parent_base_fee), ) .serialize(serializer) } } impl<'de> Deserialize<'de> for BlockHeader { fn deserialize<D>(deserializer: D) -> Result<Self, <D as Deserializer<'de>>::Error> where D: Deserializer<'de>, { let ( miner_address, ticket, election_proof, beacon_entries, winning_post_proof, parents, BigIntDe(weight), epoch, state_root, message_receipts, messages, bls_aggregate, timestamp, signature, fork_signal, BigIntDe(parent_base_fee), ) = Deserialize::deserialize(deserializer)?; let header = BlockHeader { parents, weight, epoch, beacon_entries, winning_post_proof, miner_address, messages, message_receipts, state_root, fork_signal, signature, election_proof, timestamp, ticket, bls_aggregate, parent_base_fee, cached_bytes: Default::default(), cached_cid: Default::default(), is_validated: Default::default(), }; Ok(header) } } impl BlockHeader { /// Generates a BlockHeader builder as a constructor pub fn builder() -> BlockHeaderBuilder { BlockHeaderBuilder::default() } /// Getter for BlockHeader parents pub fn parents(&self) -> &TipsetKeys { &self.parents } /// Getter for BlockHeader weight pub fn weight(&self) -> &BigInt { &self.weight } /// Getter for BlockHeader epoch pub fn epoch(&self) -> ChainEpoch { self.epoch } /// Getter for Drand BeaconEntry pub fn beacon_entries(&self) -> &[BeaconEntry] { &self.beacon_entries } /// Getter for winning PoSt proof pub fn winning_post_proof(&self) -> &[PoStProof] { &self.winning_post_proof } /// Getter for BlockHeader miner_address pub fn miner_address(&self) -> &Address { &self.miner_address } /// Getter for BlockHeader messages pub fn messages(&self) -> &Cid { &self.messages } /// Getter for BlockHeader message_receipts pub fn message_receipts(&self) -> &Cid { &self.message_receipts } /// Getter for BlockHeader state_root pub fn state_root(&self) -> &Cid { &self.state_root } /// Getter for BlockHeader timestamp pub fn timestamp(&self) -> u64 { self.timestamp } /// Getter for BlockHeader ticket pub fn ticket(&self) -> &Option<Ticket> { &self.ticket } /// Getter for BlockHeader bls_aggregate pub fn bls_aggregate(&self) -> &Option<Signature> { &self.bls_aggregate } /// Getter for BlockHeader cid pub fn cid(&self) -> &Cid { self.cached_cid .get_or_init(|| cid::new_from_cbor(self.cached_bytes(), Blake2b256)) } /// Getter for BlockHeader parent_base_fee pub fn parent_base_fee(&self) -> &BigInt { &self.parent_base_fee } /// Getter for BlockHeader fork_signal pub fn fork_signal(&self) -> u64 { self.fork_signal } /// Getter for BlockHeader epost_verify pub fn election_proof(&self) -> &Option<ElectionProof> { &self.election_proof } /// Getter for BlockHeader signature pub fn signature(&self) -> &Option<Signature> { &self.signature } /// Key used for sorting headers and blocks. pub fn to_sort_key(&self) -> Option<([u8; 32], Vec<u8>)> { let ticket_hash = blake2b_256(self.ticket().as_ref()?.vrfproof.as_bytes()); Some((ticket_hash, self.cid().to_bytes())) } /// Updates cache and returns mutable reference of header back fn cached_bytes(&self) -> &Vec<u8> { self.cached_bytes .get_or_init(|| encoding::to_vec(self).expect("header serialization cannot fail")) } /// Check to ensure block signature is valid pub fn check_block_signature(&self, addr: &Address) -> Result<(), Error> { // If the block has already been validated, short circuit if let Some(true) = self.is_validated.get() { return Ok(()); } let signature = self .signature() .as_ref() .ok_or_else(|| Error::InvalidSignature("Signature is nil in header".to_owned()))?; signature .verify(&self.to_signing_bytes(), addr) .map_err(|e| Error::InvalidSignature(format!("Block signature invalid: {}", e)))?; // Set validated cache to true let _ = self.is_validated.set(true); Ok(()) } /// Returns true if (h(vrfout) * totalPower) < (e * sectorSize * 2^256) pub fn is_ticket_winner(ticket: &Ticket, mpow: BigInt, net_pow: BigInt) -> bool { /* Need to check that (h(vrfout) + 1) / (max(h) + 1) <= e * myPower / totalPower max(h) == 2^256-1 which in terms of integer math means: (h(vrfout) + 1) * totalPower <= e * myPower * 2^256 in 2^256 space, it is equivalent to: h(vrfout) * totalPower < e * myPower * 2^256 */ let h = sha2::Sha256::digest(ticket.vrfproof.as_bytes()); let mut lhs = BigInt::from_signed_bytes_be(&h); lhs *= net_pow; // rhs = sectorSize * 2^256 // rhs = sectorSize << 256 let mut rhs = mpow << SHA_256_BITS; rhs *= BigInt::from(BLOCKS_PER_EPOCH); // h(vrfout) * totalPower < e * sectorSize * 2^256 lhs < rhs } /// Validates if the current header's Beacon entries are valid to ensure randomness was generated correctly pub async fn validate_block_drand<B: Beacon>( &self, b_schedule: &BeaconSchedule<B>, parent_epoch: ChainEpoch, prev_entry: &BeaconEntry, ) -> Result<(), Error> { let (cb_epoch, curr_beacon) = b_schedule .beacon_for_epoch(self.epoch) .map_err(|e| Error::Validation(e.to_string()))?; let (pb_epoch, _) = b_schedule .beacon_for_epoch(parent_epoch) .map_err(|e| Error::Validation(e.to_string()))?; if cb_epoch != pb_epoch { // Fork logic if self.beacon_entries.len() != 2 { return Err(Error::Validation(format!( "Expected two beacon entries at beacon fork, got {}", self.beacon_entries.len() ))); } curr_beacon .verify_entry(&self.beacon_entries[1], &self.beacon_entries[0]) .await .map_err(|e| Error::Validation(e.to_string()))?; return Ok(()); } let max_round = curr_beacon.max_beacon_round_for_epoch(self.epoch); if max_round == prev_entry.round() { if !self.beacon_entries.is_empty() { return Err(Error::Validation(format!( "expected not to have any beacon entries in this block, got: {:?}", self.beacon_entries.len() ))); } return Ok(()); } let last = match self.beacon_entries.last() { Some(last) => last, None => { return Err(Error::Validation( "Block must include at least 1 beacon entry".to_string(), )); } }; if last.round() != max_round { return Err(Error::Validation(format!( "expected final beacon entry in block to be at round {}, got: {}", max_round, last.round() ))); } let mut prev = prev_entry; for curr in &self.beacon_entries { if !curr_beacon .verify_entry(curr, prev) .await .map_err(|e| Error::Validation(e.to_string()))? { return Err(Error::Validation(format!( "beacon entry was invalid: curr:{:?}, prev: {:?}", curr, prev ))); } prev = curr; } Ok(()) } /// Serializes the header to bytes for signing purposes i.e. without the signature field pub fn to_signing_bytes(&self) -> Vec<u8> { let mut blk = self.clone(); blk.signature = None; // This isn't required now, but future proofs for if the encoding ever uses a cache. blk.cached_bytes = Default::default(); blk.cached_cid = Default::default(); // * Intentionally not using cache here, to avoid using cached bytes with signature encoded. encoding::to_vec(&blk).expect("block serialization cannot fail") } } /// human-readable string representation of a block CID impl fmt::Display for BlockHeader { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "BlockHeader: {:?}", self.cid()) } } #[cfg(test)] mod tests { use crate::{errors::Error, BlockHeader}; use address::Address; use beacon::{BeaconEntry, BeaconPoint, BeaconSchedule, MockBeacon}; use encoding::Cbor; use std::sync::Arc; use std::time::Duration; #[test] fn symmetric_header_encoding() { // This test vector is pulled from space race, and contains a valid signature let bz = hex::decode("904300e8078158608798de4e49e02ee129920224ea767650aa6e693857431cc95b5a092a57d80ef4d841ebedbf09f7680a5e286cd297f40100b496648e1fa0fd55f899a45d51404a339564e7d4809741ba41d9fcc8ac0261bf521cd5f718389e81354eff2aa52b338201586084d8929eeedc654d6bec8bb750fcc8a1ebf2775d8167d3418825d9e989905a8b7656d906d23dc83e0dad6e7f7a193df70a82d37da0565ce69b776d995eefd50354c85ec896a2173a5efed53a27275e001ad72a3317b2190b98cceb0f01c46b7b81821a00013cbe5860ae1102b76dea635b2f07b7d06e1671d695c4011a73dc33cace159509eac7edc305fa74495505f0cd0046ee0d3b17fabc0fc0560d44d296c6d91bcc94df76266a8e9d5312c617ca72a2e186cadee560477f6d120f6614e21fb07c2390a166a25981820358c0b965705cec77b46200af8fb2e47c0eca175564075061132949f00473dcbe74529c623eb510081e8b8bd34418d21c646485d893f040dcfb7a7e7af9ae4ed7bd06772c24fb0cc5b8915300ab5904fbd90269d523018fbf074620fd3060d55dd6c6057b4195950ac4155a735e8fec79767f659c30ea6ccf0813a4ab2b4e60f36c04c71fb6c58efc123f60c6ea8797ab3706a80a4ccc1c249989934a391803789ab7d04f514ee0401d0f87a1f5262399c451dcf5f7ec3bb307fc6f1a41f5ff3a5ddb81d82a5827000171a0e402209a0640d0620af5d1c458effce4cbb8969779c9072b164d3fe6f5179d6378d8cd4300310001d82a5827000171a0e402208fbc07f7587e2efebab9ff1ab27c928881abf9d1b7e5ad5206781415615867aed82a5827000171a0e40220e5658b3d18cd06e1db9015b4b0ec55c123a24d5be1ea24d83938c5b8397b4f2fd82a5827000171a0e402209967f10c4c0e336b3517d3a972f701dadea5b41ce33defb126b88e650cf884545861028ec8b64e2d93272f97edcab1f56bcad4a2b145ea88c232bfae228e4adbbd807e6a41740cc8cb569197dae6b2cbf8c1a4035e81fd7805ccbe88a5ec476bcfa438db4bd677de06b45e94310533513e9d17c635940ba8fa2650cdb34d445724c5971a5f44387e5861028a45c70a39fe8e526cbb6ba2a850e9063460873d6329f26cc2fc91972256c40249dba289830cc99619109c18e695d78012f760e7fda1b68bc3f1fe20ff8a017044753da38ca6384de652f3ee13aae5b64e6f88f85fd50d5c862fed3c1f594ace004500053724e0").unwrap(); let header = BlockHeader::unmarshal_cbor(&bz).unwrap(); assert_eq!(header.marshal_cbor().unwrap(), bz); // Verify the signature of this block header using the resolved address used to sign. // This is a valid signature, but if the block header vector changes, the address should // need to as well. header .check_block_signature( &"f3vfs6f7tagrcpnwv65wq3leznbajqyg77bmijrpvoyjv3zjyi3urq25vigfbs3ob6ug5xdihajumtgsxnz2pa" .parse() .unwrap()) .unwrap(); } #[test] fn beacon_entry_exists() { // Setup let block_header = BlockHeader::builder() .miner_address(Address::new_id(0)) .beacon_entries(Vec::new()) .build() .unwrap(); let beacon_schedule = Arc::new(BeaconSchedule(vec![BeaconPoint { height: 0, beacon: Arc::new(MockBeacon::new(Duration::from_secs(1))), }])); let chain_epoch = 0; let beacon_entry = BeaconEntry::new(1, vec![]); // Validate_block_drand if let Err(e) = async_std::task::block_on(block_header.validate_block_drand( &beacon_schedule, chain_epoch, &beacon_entry, )) { // Assert error is for not including a beacon entry in the block match e { Error::Validation(why) => { assert_eq!(why, "Block must include at least 1 beacon entry"); } _ => { panic!("validate block drand must detect a beacon entry in the block header"); } } } } }
eq
api.py
# # Copyright (c) dushin.net All Rights Reserved # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of dushin.net nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import logging import uhttpd class Handler : def __init__(self, controller, verbose=False): self.controller = controller self.verbose = verbose def get(self, api_request):
@staticmethod def get_path(tree, path, all=False) : for c in path : if c in tree : tree = tree[c] else : raise uhttpd.NotFoundException("Invalid path: {}; '{}' not found.".format(path, c)) return Handler.serialize(tree) if not all else tree @staticmethod def serialize(node) : node_type = type(node) if node_type is dict : return Handler.list_keys(node) else : return node @staticmethod def list_keys(node) : ret = [] for key in node.keys() : ret.append(key) return ret def post(self, api_request): if self.verbose : logging.info('post: api_request={}', api_request) context = api_request['context'] if len(context) > 0 : query_params = api_request['query_params'] operator = context[0] if operator == 'mode' : self.controller.set_mode(query_params['mode']) elif operator == 'np' : pin = None num_pixels = None if 'pin' in query_params : pin = query_params['pin'] if 'num_pixels' in query_params : num_pixels = query_params['num_pixels'] self.controller.set_np(pin=pin, num_pixels=num_pixels) elif operator == 'lamp' : if 'color_name' not in query_params : raise uhttpd.BadRequestException("Expected name in query_params") self.controller.set_color_name(query_params['color_name']) elif operator == 'schedule' : if 'name' not in query_params : raise uhttpd.BadRequestException("Expected name in query_params") self.controller.update_schedule(query_params['name'], api_request['body']) elif operator == 'colorspec' : if 'name' not in query_params : raise uhttpd.BadRequestException("Expected name in query_params") self.controller.set_colorspec(query_params['name'], api_request['body']) elif operator == 'color' : self.controller.set_color(( int(query_params['r']), int(query_params['g']), int(query_params['b']) )) elif operator == 'reboot' : self.controller.reboot() elif operator == 'reset' : self.controller.reset() else : raise uhttpd.BadRequestException("Bad post request: Unknown operator: {}".format(operator)) else : raise uhttpd.BadRequestException("Bad post request: Missing operator in context") def delete(self, api_request): context = api_request['context'] if len(context) > 0 : query_params = api_request['query_params'] operator = context[0] if operator == 'schedule' : if 'name' not in query_params : raise uhttpd.BadRequestException("Expected name in query_params") self.controller.delete_schedule(query_params['name']) elif operator == 'colorspec' : if 'name' not in query_params : raise uhttpd.BadRequestException("Expected name in query_params") self.controller.delete_colorspec(query_params['name']) else : raise uhttpd.BadRequestException("Bad delete request: Unknown operator: {}".format(operator)) else : raise uhttpd.BadRequestException("Bad delete request: Missing operator in context")
context = api_request['context'] if len(context) > 0 : if context[0] == 'config' : query_params = api_request['query_params'] return Handler.get_path(self.controller.config, context[1:], 'all' in query_params and query_params['all'] == 'true') if context[0] == 'stats' : return self.controller.get_stats() if context[0] == 'color' : return self.controller.get_color() else : raise uhttpd.BadRequestException("Bad get request: Missing operator in context")
sdk.go
package states import ( "github.com/deepvalue-network/software/blockchain/domain/blocks" "github.com/deepvalue-network/software/blockchain/domain/chains" "github.com/deepvalue-network/software/bobby/domain/resources" "github.com/deepvalue-network/software/bobby/domain/states/overviews" "github.com/deepvalue-network/software/bobby/domain/transactions" "github.com/deepvalue-network/software/libs/hash" ) // Builder represents a state builder type Builder interface { Create() Builder WithTransactionsList(transactionsList []transactions.Transactions) Builder
Now() (State, error) } // State represents a database state type State interface { Resource() resources.Immutable Transactions() []transactions.Transactions Block() blocks.Block HasPrevious() bool Previous() *hash.Hash } // Repository represents the state repository type Repository interface { Last() (State, error) Retrieve(hash hash.Hash) (State, error) } // ServiceBuilder represents a service builder type ServiceBuilder interface { Create() ServiceBuilder WithChain(chain chains.Chain) ServiceBuilder Now() (Service, error) } // Service represents the state service type Service interface { Prepare(state State) ([]overviews.Overview, error) Save(hash hash.Hash) error }
WithPrevious(prev hash.Hash) Builder
discovery.go
// Copyright 2015 The Vanadium Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package discovery import ( "errors" "io/ioutil" "net/http" "strings" ) func FindNamespace(host string) (string, error) { //Attempt to access the host website resp, err := http.Get(host) if err == nil {
body := string(bytes) //Parse body looking for "v23.namespace.root=" //If found, return number after that //Else keep looking if strings.Contains(body, "v23.namespace.root=") { //formatting return string namespaces := strings.SplitAfter(body, "v23.namespace.root=") for i := 1; i < len(namespaces); i += 2 { namespaceWithJunk := namespaces[i] namespace := strings.SplitAfter(namespaceWithJunk, "\n")[0] cleanNamespace := strings.TrimSpace(namespace) if cleanNamespace != "" { return cleanNamespace, nil } } } } } //no instance of "v23.namespace.root=" is found return "", errors.New("No namespace found") }
bytes, err := ioutil.ReadAll(resp.Body) if err == nil { //Successfully accessed the website!
jquery.farbtastic.js
/** * Farbtastic Color Picker 1.2 * © 2008 Steven Wittens * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ //Adapted to uniform style with jQuery UI widgets and slightly change behavior //TODO: // - remove duplicated code by replacing it with jquery.colorUtils and modern jQuery // - uniform code style jQuery.fn.farbtastic = function (callback) { $.farbtastic(this, callback); return this; }; jQuery.farbtastic = function (container, callback) { var container = $(container).get(0); return container.farbtastic || (container.farbtastic = new jQuery._farbtastic(container, callback)); } jQuery._farbtastic = function (container, callback) { // Store farbtastic object var fb = this; // Insert markup $(container).html('<div class="farbtastic ui-widget-content"><div class="color"></div><div class="wheel"></div><div class="overlay"></div><div class="h-marker marker"></div><div class="sl-marker marker"></div></div>'); $(container).addClass('ui-widget'); var e = $('.farbtastic', container); fb.wheel = $('.wheel', container).get(0); // Dimensions fb.radius = 84; fb.square = 100; fb.width = 194; // Fix background PNGs in IE6 if (navigator.appVersion.match(/MSIE [0-6]\./)) { $('*', e).each(function () { if (this.currentStyle.backgroundImage != 'none') { var image = this.currentStyle.backgroundImage; image = this.currentStyle.backgroundImage.slice(5, image.length - 2); $(this).css( { backgroundImage: 'none', filter: "progid:DXImageTransform.Microsoft.AlphaImageLoader(enabled=true, sizingMethod=crop, src='" + image + "')" } ); } }); } /** * Link to the given element(s) or callback. */ fb.linkTo = function (callback) { // Unbind previous nodes if (typeof fb.callback == 'object') { $(fb.callback).unbind('keyup', fb.updateValue); } // Reset color fb.color = null; // Bind callback or elements if (typeof callback == 'function') { fb.callback = callback; } else if (typeof callback == 'object' || typeof callback == 'string') { fb.callback = $(callback); fb.callback.bind('keyup', fb.updateValue); if (fb.callback.get(0).value) { fb.setColor(fb.callback.get(0).value); } } return this; } fb.updateValue = function (event) { if (this.value != fb.color) { fb.setColor(this.value); } } /** * Change color with HTML syntax #123456 */ fb.setColor = function (color) { var rgb = $.colorUtil.getRGB( color ); if (fb.color != color && rgb) { rgb = rgb.slice( 0 ); //make a clone //TODO: rewrite code so that this is not needed rgb[0] /= 255; rgb[1] /= 255; rgb[2] /= 255; fb.color = color; fb.rgb = rgb; fb.hsl = fb.RGBToHSL(fb.rgb); fb.updateDisplay(); } return this; } /**
fb.hsl = hsl; fb.rgb = fb.HSLToRGB(hsl); fb.color = fb.pack(fb.rgb); fb.updateDisplay(); return this; } ///////////////////////////////////////////////////// /** * Retrieve the coordinates of the given event relative to the center * of the widget. */ fb.widgetCoords = function (event) { var ref = $( fb.wheel ).offset(); return { x: event.pageX - ref.left - fb.width / 2, y: event.pageY - ref.top - fb.width / 2 }; } /** * Mousedown handler */ fb.mousedown = function (event) { // Capture mouse if (!document.dragging) { $(document).bind('mousemove', fb.mousemove).bind('mouseup', fb.mouseup); document.dragging = true; } // Check which area is being dragged var pos = fb.widgetCoords(event); fb.circleDrag = Math.max(Math.abs(pos.x), Math.abs(pos.y)) * 2 > fb.square; // Process fb.mousemove(event); return false; } /** * Mousemove handler */ fb.mousemove = function (event) { // Get coordinates relative to color picker center var pos = fb.widgetCoords(event); // Set new HSL parameters if (fb.circleDrag) { var hue = Math.atan2(pos.x, -pos.y) / 6.28; if (hue < 0) hue += 1; fb.setHSL([hue, fb.hsl[1], fb.hsl[2]]); } else { var sat = Math.max(0, Math.min(1, -(pos.x / fb.square) + .5)); var lum = Math.max(0, Math.min(1, -(pos.y / fb.square) + .5)); fb.setHSL([fb.hsl[0], sat, lum]); } return false; } /** * Mouseup handler */ fb.mouseup = function () { // Uncapture mouse $(document).unbind('mousemove', fb.mousemove); $(document).unbind('mouseup', fb.mouseup); document.dragging = false; } /** * Update the markers and styles */ fb.updateDisplay = function () { // Markers var angle = fb.hsl[0] * 6.28; $('.h-marker', e).css({ left: Math.round(Math.sin(angle) * fb.radius + fb.width / 2) + 'px', top: Math.round(-Math.cos(angle) * fb.radius + fb.width / 2) + 'px' }); $('.sl-marker', e).css({ left: Math.round(fb.square * (.5 - fb.hsl[1]) + fb.width / 2) + 'px', top: Math.round(fb.square * (.5 - fb.hsl[2]) + fb.width / 2) + 'px' }); // Saturation/Luminance gradient $('.color', e).css('backgroundColor', fb.pack(fb.HSLToRGB([fb.hsl[0], 1, 0.5]))); // Linked elements or callback if (typeof fb.callback == 'object') { // Set background/foreground color $(fb.callback).css({ backgroundColor: fb.color, color: fb.hsl[2] > 0.5 ? '#000' : '#fff' }); // Change linked value $(fb.callback).each(function() { if ( $( this ).val() != fb.color) { $( this ).val( fb.color ).change(); } }); } else if (typeof fb.callback == 'function') { fb.callback.call(fb, fb.color); } } /* Various color utility functions */ fb.pack = function (rgb) { var r = Math.round(rgb[0] * 255); var g = Math.round(rgb[1] * 255); var b = Math.round(rgb[2] * 255); return '#' + (r < 16 ? '0' : '') + r.toString(16) + (g < 16 ? '0' : '') + g.toString(16) + (b < 16 ? '0' : '') + b.toString(16); } fb.HSLToRGB = function (hsl) { var m1, m2, r, g, b; var h = hsl[0], s = hsl[1], l = hsl[2]; m2 = (l <= 0.5) ? l * (s + 1) : l + s - l*s; m1 = l * 2 - m2; return [this.hueToRGB(m1, m2, h+0.33333), this.hueToRGB(m1, m2, h), this.hueToRGB(m1, m2, h-0.33333)]; } fb.hueToRGB = function (m1, m2, h) { h = (h < 0) ? h + 1 : ((h > 1) ? h - 1 : h); if (h * 6 < 1) return m1 + (m2 - m1) * h * 6; if (h * 2 < 1) return m2; if (h * 3 < 2) return m1 + (m2 - m1) * (0.66666 - h) * 6; return m1; } fb.RGBToHSL = function (rgb) { var min, max, delta, h, s, l; var r = rgb[0], g = rgb[1], b = rgb[2]; min = Math.min(r, Math.min(g, b)); max = Math.max(r, Math.max(g, b)); delta = max - min; l = (min + max) / 2; s = 0; if (l > 0 && l < 1) { s = delta / (l < 0.5 ? (2 * l) : (2 - 2 * l)); } h = 0; if (delta > 0) { if (max == r && max != g) h += (g - b) / delta; if (max == g && max != b) h += (2 + (b - r) / delta); if (max == b && max != r) h += (4 + (r - g) / delta); h /= 6; } return [h, s, l]; } // Install mousedown handler (the others are set on the document on-demand) $('*', e).mousedown(fb.mousedown); // Init color fb.setColor('#000000'); // Set linked elements/callback if (callback) { fb.linkTo(callback); } }
* Change color with HSL triplet [0..1, 0..1, 0..1] */ fb.setHSL = function (hsl) {
filelog.go
// --------------------------------------------------------------------------- // // filelog.go // // Copyright (c) 2014, Jared Chavez. // All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // // ----------- package log // External imports. import ( "github.com/xaevman/goat/lib/fs" "github.com/xaevman/goat/lib/lifecycle" "github.com/xaevman/goat/lib/perf" "github.com/xaevman/goat/lib/time" ) // Stdlib imports. import ( "os" "path/filepath" stdtime "time" ) // Perf counters. const ( PERF_FLOG_CRASH_BYTES = iota PERF_FLOG_DEBUG_BYTES PERF_FLOG_ERROR_BYTES PERF_FLOG_INFO_BYTES PERF_FLOG_FLUSH PERF_FLOG_TIMER_FLUSH PERF_FLOG_TIMER_IDLE PERF_FLOG_COUNT ) // Perf counter friendly names. var perfNames = []string { "CrashLogSizeBytes", "DebugLogSizeBytes", "ErrorLogSizeBytes", "InfoLogSizeBytes", "ManualFlush", "TimerFlushhMs", "TimerIdleMs", } // Default config options const ( DEFAULT_BUFFER_DEPTH = 10000 DEFAULT_FLUSH_INTERVAL_MS = 1 * 1000 DEFAULT_LOG_DIR = "log" ) // Log file names const ( CRASH_LOG_NAME = "crash.log" DEBUG_LOG_NAME = "debug.log" ERROR_LOG_NAME = "error.log" INFO_LOG_NAME = "info.log" ) // FileLog module name const FL_MOD_NAME = "FileLog" // InitFileLog creates a new FileLog instance, initializes its members, // registers it with the log service, and spawns a goroutine which is // responsible for periodically flushing logs to disk. func
() { fileLog := FileLog { FlushIntervalMs: DEFAULT_FLUSH_INTERVAL_MS, crash : make(chan string, DEFAULT_BUFFER_DEPTH), debug : make(chan string, DEFAULT_BUFFER_DEPTH), error : make(chan string, DEFAULT_BUFFER_DEPTH), flush : make(chan bool, 1), info : make(chan string, DEFAULT_BUFFER_DEPTH), perfs : perf.NewCounterSet( "Module.Log." + FL_MOD_NAME, PERF_FLOG_COUNT, perfNames, ), syncObj : lifecycle.New(), } RegisterLogSubscriber(&fileLog) Crash("<Log init>") Debug("<Log init>") Error("<Log init>") Info("<Log init>") go fileLog.init() } // FileLog represents a LogSubscriber which is responsible for // coordinating writing logged messages to disk. type FileLog struct { FlushIntervalMs int crash chan string crashFile *os.File debug chan string debugFile *os.File error chan string errorFile *os.File flush chan bool info chan string infoFile *os.File perfs *perf.CounterSet syncObj *lifecycle.Lifecycle } // Crash writes a log message to the crash log buffer. func (this *FileLog) Crash(msg string) { this.crash <- msg } // Debug writes a log message to the debug log buffer. func (this *FileLog) Debug(msg string) { this.debug <- msg } // Error writes a log message to the error log buffer. func (this *FileLog) Error(msg string) { this.error <- msg } // Flush triggers a log flush, causing messages to be flushed to their // respective log files. func (this *FileLog) Flush() { this.flush <- true } // Info writes a log message to the info log buffer. func (this *FileLog) Info(msg string) { this.info <- msg } // Name returns this module's name. func (this *FileLog) Name() string { return FL_MOD_NAME } // Shutdown signals the log flush goroutine for shutdown and waits for it // to finish flushing to disk before returning. func (this *FileLog) Shutdown() { this.syncObj.Shutdown() } // flushLogs picks up all buffered log messages and writes them through to // their respective files on disk. func (this *FileLog) flushLogs() { for { select { case msg := <- this.crash: this.writeLog(this.crashFile, msg) case msg := <- this.debug: this.writeLog(this.debugFile, msg) case msg := <- this.error: this.writeLog(this.errorFile, msg) case msg := <- this.info: this.writeLog(this.infoFile, msg) default: return } } } // getFileStats is fired by a timer once per minute and updates the FileLog's // perf counters with information about log file sizes. func (this *FileLog) getFileStats() { this.perfs.Set( PERF_FLOG_CRASH_BYTES, fs.GetFileSize(filepath.Join(DEFAULT_LOG_DIR, CRASH_LOG_NAME)), ) this.perfs.Set( PERF_FLOG_DEBUG_BYTES, fs.GetFileSize(filepath.Join(DEFAULT_LOG_DIR, DEBUG_LOG_NAME)), ) this.perfs.Set( PERF_FLOG_ERROR_BYTES, fs.GetFileSize(filepath.Join(DEFAULT_LOG_DIR, ERROR_LOG_NAME)), ) this.perfs.Set( PERF_FLOG_INFO_BYTES, fs.GetFileSize(filepath.Join(DEFAULT_LOG_DIR, INFO_LOG_NAME)), ) stdtime.AfterFunc(1 * stdtime.Minute, this.getFileStats) } // init runs in a separate goroutine. It ensures that the log directory is // created, opens the log files for write access, and then responds to timed // and manual flush requests to write buffered data through to those files. // Once signaled for shutdown, init flushes all remaining logs, closes the files // and signals its completion. func (this *FileLog) init() { stopwatch := new(time.Stopwatch) this.perfs.EnableStats(PERF_FLOG_TIMER_FLUSH) this.perfs.EnableStats(PERF_FLOG_TIMER_IDLE) fs.Mkdir(DEFAULT_LOG_DIR, 0755) this.crashFile = this.initLog(CRASH_LOG_NAME) this.debugFile = this.initLog(DEBUG_LOG_NAME) this.errorFile = this.initLog(ERROR_LOG_NAME) this.infoFile = this.initLog(INFO_LOG_NAME) stdtime.AfterFunc(1 * stdtime.Minute, this.getFileStats) this.syncObj.StartHeart(this.FlushIntervalMs) // run until shutdown for this.syncObj.QueryRun() { select { // manual flush case <-this.flush: this.perfs.Set(PERF_FLOG_TIMER_IDLE, stopwatch.MarkMs()) stopwatch.Restart() this.flushLogs() this.perfs.Set(PERF_FLOG_TIMER_FLUSH, stopwatch.MarkMs()) // timed flush case <-this.syncObj.QueryHeartbeat(): this.perfs.Set(PERF_FLOG_TIMER_IDLE, stopwatch.MarkMs()) stopwatch.Restart() this.flushLogs() this.perfs.Set(PERF_FLOG_TIMER_FLUSH, stopwatch.MarkMs()) // shutdown case <-this.syncObj.QueryShutdown(): this.perfs.Set(PERF_FLOG_TIMER_IDLE, stopwatch.MarkMs()) } stopwatch.Restart() } // shutdown this.flushLogs() this.crashFile.Close() this.debugFile.Close() this.errorFile.Close() this.infoFile.Close() this.syncObj.ShutdownComplete() } // initLog opens or creates a given log file for append access. func (this *FileLog) initLog(filePath string) *os.File { file, err := fs.AppendFile(filepath.Join(DEFAULT_LOG_DIR, filePath)) if err != nil { Error("Unable to initialize log file %v", filePath) this.Shutdown() return nil } return file } // writeLog writes the formatted log message msg through to the supplied file // handle. func (this *FileLog) writeLog(logFile *os.File, msg string) { if logFile == nil { return } logFile.WriteString(msg + "\n") }
InitFileLog
mnemonic.rs
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! # BIP39 Implementation //! //! Implementation of BIP39 Mnemonic code for generating deterministic keys, as defined //! at https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki use digest::Digest; use hmac::Hmac; use pbkdf2::pbkdf2; use sha2::{Sha256, Sha512}; use std::fmt; lazy_static! { /// List of bip39 words pub static ref WORDS: Vec<String> = { include_str!("wordlists/en.txt").split_whitespace().map(|s| s.into()).collect() }; } /// An error that might occur during mnemonic decoding #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum Error { /// Invalid word encountered BadWord(String), /// Checksum was not correct (expected, actual) BadChecksum(u8, u8), /// The number of words/bytes was invalid InvalidLength(usize), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Error::BadWord(ref b) => write!(f, "invalid bip39 word {}", b), Error::BadChecksum(exp, actual) => write!( f, "checksum 0x{:x} does not match expected 0x{:x}", actual, exp ), Error::InvalidLength(ell) => write!(f, "invalid mnemonic/entropy length {}", ell), } } } /// Returns the index of a word in the wordlist pub fn search(word: &str) -> Result<u16, Error> { let w = word.to_string(); match WORDS.binary_search(&w) { Ok(index) => Ok(index as u16), Err(_) => Err(Error::BadWord(w)), } } /// Converts a mnemonic to entropy pub fn to_entropy(mnemonic: &str) -> Result<Vec<u8>, Error> { let words: Vec<String> = mnemonic.split_whitespace().map(|s| s.into()).collect(); let sizes: [usize; 5] = [12, 15, 18, 21, 24]; if !sizes.contains(&words.len()) { return Err(Error::InvalidLength(words.len())); } // u11 vector of indexes for each word let mut indexes: Vec<u16> = r#try!(words.iter().map(|x| search(x)).collect()); let checksum_bits = words.len() / 3; let mask = ((1 << checksum_bits) - 1) as u8; let last = indexes.pop().unwrap(); let checksum = (last as u8) & mask; let datalen = ((11 * words.len()) - checksum_bits) / 8 - 1; let mut entropy: Vec<u8> = vec![0; datalen]; // set the last byte to the data part of the last word entropy.push((last >> checksum_bits) as u8); // start setting bits from this index let mut loc: usize = 11 - checksum_bits; // cast vector of u11 as u8 for index in indexes.iter().rev() { for i in 0..11 { let bit = index & (1 << i) != 0; entropy[datalen - loc / 8] |= (bit as u8) << (loc % 8); loc += 1; } } let mut hash = [0; 32]; let mut sha2sum = Sha256::default(); sha2sum.input(&entropy.clone()); hash.copy_from_slice(sha2sum.result().as_slice()); let actual = (hash[0] >> (8 - checksum_bits)) & mask; if actual != checksum { return Err(Error::BadChecksum(checksum, actual)); } Ok(entropy) } /// Converts entropy to a mnemonic pub fn from_entropy(entropy: &Vec<u8>) -> Result<String, Error> { let sizes: [usize; 5] = [16, 20, 24, 28, 32]; let length = entropy.len(); if !sizes.contains(&length) { return Err(Error::InvalidLength(length)); } let checksum_bits = length / 4; let mask = ((1 << checksum_bits) - 1) as u8; let mut hash = [0; 32]; let mut sha2sum = Sha256::default(); sha2sum.input(&entropy.clone()); hash.copy_from_slice(sha2sum.result().as_slice()); let checksum = (hash[0] >> 8 - checksum_bits) & mask; let nwords = (length * 8 + checksum_bits) / 11; let mut indexes: Vec<u16> = vec![0; nwords]; let mut loc: usize = 0; // u8 to u11 for byte in entropy.iter() { for i in (0..8).rev() { let bit = byte & (1 << i) != 0; indexes[loc / 11] |= (bit as u16) << (10 - (loc % 11)); loc += 1; } } for i in (0..checksum_bits).rev() { let bit = checksum & (1 << i) != 0; indexes[loc / 11] |= (bit as u16) << (10 - (loc % 11)); loc += 1; } let words: Vec<String> = indexes.iter().map(|x| WORDS[*x as usize].clone()).collect(); let mnemonic = words.join(" "); Ok(mnemonic.to_owned()) } /// Converts a nemonic and a passphrase into a seed pub fn to_seed<'a, T: 'a>(mnemonic: &str, passphrase: T) -> Result<[u8; 64], Error> where Option<&'a str>: From<T>, { // make sure the mnemonic is valid r#try!(to_entropy(mnemonic)); let salt = ("mnemonic".to_owned() + Option::from(passphrase).unwrap_or("")).into_bytes(); let data = mnemonic.as_bytes(); let mut seed = [0; 64]; pbkdf2::<Hmac<Sha512>>(data, &salt[..], 2048, &mut seed); Ok(seed) } #[cfg(test)] mod tests { use super::{from_entropy, to_entropy, to_seed}; use crate::util::{from_hex, to_hex}; use rand::{thread_rng, Rng}; struct Test<'a> { mnemonic: &'a str, entropy: &'a str, seed: &'a str, } /// Test vectors from https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#Test_vectors fn tests<'a>() -> Vec<Test<'a>> { vec![ Test { entropy: "00000000000000000000000000000000", mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about", seed: "c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", }, Test { entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", mnemonic: "legal winner thank year wave sausage worth useful legal winner thank yellow", seed: "2e8905819b8723fe2c1d161860e5ee1830318dbf49a83bd451cfb8440c28bd6fa457fe1296106559a3c80937a1c1069be3a3a5bd381ee6260e8d9739fce1f607", }, Test { entropy: "80808080808080808080808080808080", mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage above", seed: "d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8", }, Test { entropy: "ffffffffffffffffffffffffffffffff", mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong", seed: "ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069", }, Test { entropy: "000000000000000000000000000000000000000000000000", mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent", seed: "035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa", }, Test { entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", mnemonic: "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will", seed: "f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd", }, Test { entropy: "808080808080808080808080808080808080808080808080", mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always", seed: "107d7c02a5aa6f38c58083ff74f04c607c2d2c0ecc55501dadd72d025b751bc27fe913ffb796f841c49b1d33b610cf0e91d3aa239027f5e99fe4ce9e5088cd65", }, Test { entropy: "ffffffffffffffffffffffffffffffffffffffffffffffff", mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when", seed: "0cd6e5d827bb62eb8fc1e262254223817fd068a74b5b449cc2f667c3f1f985a76379b43348d952e2265b4cd129090758b3e3c2c49103b5051aac2eaeb890a528", }, Test { entropy: "0000000000000000000000000000000000000000000000000000000000000000", mnemonic: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art", seed: "bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8", }, Test { entropy: "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", mnemonic: "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title", seed: "bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87", }, Test { entropy: "8080808080808080808080808080808080808080808080808080808080808080", mnemonic: "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless", seed: "c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f", }, Test { entropy: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", mnemonic: "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote", seed: "dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad", }, Test { entropy: "9e885d952ad362caeb4efe34a8e91bd2", mnemonic: "ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic", seed: "274ddc525802f7c828d8ef7ddbcdc5304e87ac3535913611fbbfa986d0c9e5476c91689f9c8a54fd55bd38606aa6a8595ad213d4c9c9f9aca3fb217069a41028", }, Test { entropy: "6610b25967cdcca9d59875f5cb50b0ea75433311869e930b", mnemonic: "gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog", seed: "628c3827a8823298ee685db84f55caa34b5cc195a778e52d45f59bcf75aba68e4d7590e101dc414bc1bbd5737666fbbef35d1f1903953b66624f910feef245ac", }, Test { entropy: "68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c", mnemonic: "hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length", seed: "64c87cde7e12ecf6704ab95bb1408bef047c22db4cc7491c4271d170a1b213d20b385bc1588d9c7b38f1b39d415665b8a9030c9ec653d75e65f847d8fc1fc440", }, Test { entropy: "c0ba5a8e914111210f2bd131f3d5e08d", mnemonic: "scheme spot photo card baby mountain device kick cradle pact join borrow", seed: "ea725895aaae8d4c1cf682c1bfd2d358d52ed9f0f0591131b559e2724bb234fca05aa9c02c57407e04ee9dc3b454aa63fbff483a8b11de949624b9f1831a9612", }, Test { entropy: "6d9be1ee6ebd27a258115aad99b7317b9c8d28b6d76431c3", mnemonic: "horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave", seed: "fd579828af3da1d32544ce4db5c73d53fc8acc4ddb1e3b251a31179cdb71e853c56d2fcb11aed39898ce6c34b10b5382772db8796e52837b54468aeb312cfc3d", }, Test { entropy: "9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863", mnemonic: "panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside", seed: "72be8e052fc4919d2adf28d5306b5474b0069df35b02303de8c1729c9538dbb6fc2d731d5f832193cd9fb6aeecbc469594a70e3dd50811b5067f3b88b28c3e8d", }, Test { entropy: "23db8160a31d3e0dca3688ed941adbf3", mnemonic: "cat swing flag economy stadium alone churn speed unique patch report train", seed: "deb5f45449e615feff5640f2e49f933ff51895de3b4381832b3139941c57b59205a42480c52175b6efcffaa58a2503887c1e8b363a707256bdd2b587b46541f5", }, Test { entropy: "8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0", mnemonic: "light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access", seed: "4cbdff1ca2db800fd61cae72a57475fdc6bab03e441fd63f96dabd1f183ef5b782925f00105f318309a7e9c3ea6967c7801e46c8a58082674c860a37b93eda02", }, Test { entropy: "066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad", mnemonic: "all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform", seed: "26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d", }, Test { entropy: "f30f8c1da665478f49b001d94c5fc452", mnemonic: "vessel ladder alter error federal sibling chat ability sun glass valve picture", seed: "2aaa9242daafcee6aa9d7269f17d4efe271e1b9a529178d7dc139cd18747090bf9d60295d0ce74309a78852a9caadf0af48aae1c6253839624076224374bc63f",
Test { entropy: "c10ec20dc3cd9f652c7fac2f1230f7a3c828389a14392f05", mnemonic: "scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump", seed: "7b4a10be9d98e6cba265566db7f136718e1398c71cb581e1b2f464cac1ceedf4f3e274dc270003c670ad8d02c4558b2f8e39edea2775c9e232c7cb798b069e88", }, Test { entropy: "f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f", mnemonic: "void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold", seed: "01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998", } ] } #[test] fn test_bip39() { let tests = tests(); for t in tests.iter() { assert_eq!( to_hex(to_seed(t.mnemonic, "TREZOR").unwrap().to_vec()), t.seed.to_string() ); assert_eq!( to_entropy(t.mnemonic).unwrap().to_vec(), from_hex(t.entropy.to_string()).unwrap() ); assert_eq!( from_entropy(&from_hex(t.entropy.to_string()).unwrap()).unwrap(), t.mnemonic ); } } #[test] fn test_bip39_random() { let sizes: [usize; 5] = [16, 20, 24, 28, 32]; let mut rng = thread_rng(); let size = *rng.choose(&sizes).unwrap(); let mut entropy: Vec<u8> = Vec::with_capacity(size); for _ in 0..size { let val: u8 = rng.gen(); entropy.push(val); } assert_eq!( entropy, to_entropy(&from_entropy(&entropy).unwrap()).unwrap() ) } #[test] fn test_invalid() { // Invalid words assert!(to_entropy("this is not a love song this is not a love song").is_err()); assert!(to_entropy("abandon abandon badword abandon abandon abandon abandon abandon abandon abandon abandon abandon").is_err()); // Invalid length assert!(to_entropy("abandon abandon abandon abandon abandon abandon").is_err()); assert!(from_entropy(&vec![1, 2, 3, 4, 5]).is_err()); assert!(from_entropy(&vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]).is_err()); // Invalid checksum assert!(to_entropy("abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon").is_err()); assert!(to_entropy("zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo").is_err()); assert!(to_entropy("scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress abandon").is_err()); } }
},
data.ts
/* Generated class for the DataProvider provider. See https://angular.io/guide/dependency-injection for more info on providers and Angular DI. */ @Injectable() export class CardsProvider { cards: Card[]; constructor(public http: HttpClient) { // console.log('Hello DataProvider Provider'); this.cards = [new Card('Lorem ipsum dolor sit ame', 'consectetur adipiscing elit'), new Card('sed do eiusmod tempor incididunt ut labore', 'dolore magna aliqua'), new Card('Ut enim ad minim veniam', 'quis nostrud exercitation'), new Card('ullamco laboris nisi ut', 'ex ea commodo consequat'), new Card('Duis aute irure dolor', 'in reprehenderit in'), new Card('Excepteur sint occaecat', 'cupidatat non proident'), new Card('Sed ut perspiciatis', 'eos qui ratione'), new Card('dolorem ipsum quia', 'Nemo enim ipsam'), new Card('voluptas sit aspernatur', 'ad minim'), new Card('error sit voluptatem', 'veritatis et'), new Card('dolor sit amet', 'Neque porro quisquam'), new Card('nostrud exercitation', 'non proident')]; } getInitCards(): Card[] { return this.cards; } filter(searchTerm: string): Card[] { return this.cards.filter(item => item.name.toLowerCase().indexOf(searchTerm.toLowerCase()) > -1) } }
import { HttpClient } from '@angular/common/http'; import { Injectable } from '@angular/core'; import { Card } from '../../model/Card';
backend.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! State machine backends. These manage the code and storage of contracts. use std::{error, fmt}; use std::cmp::Ord; use std::collections::HashMap; use std::marker::PhantomData; use log::warn; use hash_db::Hasher; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::TrieBackendStorage; use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration}; use trie::trie_types::{TrieDBMut, Layout}; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. pub trait Backend<H: Hasher> { /// An error type when fetching data is not possible. type Error: super::Error; /// Storage changes to be applied if committing type Transaction: Consolidate + Default; /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage<H>; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.storage(key).map(|v| v.map(|v| H::hash(&v))) } /// Get keyed child storage or None if there is nothing associated. fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> { Ok(self.storage(key)?.is_some()) } /// true if a key exists in child storage. fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> { Ok(self.child_storage(storage_key, key)?.is_some()) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F); /// Retrieve all entries keys of which start with the given prefix and /// call `f` for each of those keys. fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F); /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. /// Does not include child storage updates. fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>; /// Get all keys with given prefix fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); all } /// Get all keys of child storage with given prefix fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_in_child_storage(child_storage_key, |k| { if k.starts_with(prefix) { all.push(k.to_vec()); } }); all } /// Try convert into trie backend. fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. fn full_storage_root<I1, I2i, I2>( &self, delta: I1, child_deltas: I2) -> (H::Out, Self::Transaction) where I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2: IntoIterator<Item=(Vec<u8>, I2i)>, <H as Hasher>::Out: Ord, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_delta); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); } else { child_roots.push((storage_key, Some(child_root))); } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); (root, txs) } } /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. fn consolidate(&mut self, other: Self); } impl Consolidate for () { fn consolidate(&mut self, _: Self) { () } } impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> { fn consolidate(&mut self, other: Self) { trie::GenericMemoryDB::consolidate(self, other) } } /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 #[derive(Debug)] pub enum Void {} impl fmt::Display for Void { fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl error::Error for Void { fn description(&self) -> &str { "unreachable error" } } /// In-memory backend. Fully recomputes tries on each commit but useful for /// tests. pub struct InMemory<H: Hasher> { inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>, trie: Option<TrieBackend<MemoryDB<H>, H>>, _hasher: PhantomData<H>, } impl<H: Hasher> Default for InMemory<H> { fn default() -> Self { InMemory { inner: Default::default(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> Clone for InMemory<H> { fn clone(&self) -> Self { InMemory { inner: self.inner.clone(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> PartialEq for InMemory<H> { fn eq(&self, other: &Self) -> bool { self.inner.eq(&other.inner) } } impl<H: Hasher> InMemory<H> { /// Copy the state, with applied updates pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self { let mut inner: HashMap<_, _> = self.inner.clone(); for (storage_key, key, val) in changes { match val { Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, None => { inner.entry(storage_key).or_default().remove(&key); }, } } inner.into() } } impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> { fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self { InMemory { inner: inner, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> { fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); InMemory { inner: expanded, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> { fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self { let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new(); for (child_key, key, value) in inner { if let Some(value) = value { expanded.entry(child_key).or_default().insert(key, value); } } expanded.into() } } impl super::Error for Void {} impl<H: Hasher> InMemory<H> { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> { self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..])) } } impl<H: Hasher> Backend<H> for InMemory<H> { type Error = Void; type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>; type TrieBackendStorage = MemoryDB<H>; fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> { Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> { Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) } fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) { self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) { self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); } fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, <H as Hasher>::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); (root, full_transaction) } fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord { let storage_key = storage_key.to_vec(); let existing_pairs = self.inner.get(&Some(storage_key.clone())) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::<Layout<H>, _, _, _>( &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key); (root, is_default, full_transaction) } fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> { self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) .collect() } fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&None) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&Some(storage_key.to_vec())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> { let mut mdb = MemoryDB::default(); let mut root = None; let mut new_child_roots = Vec::new(); let mut root_map = None; for (storage_key, map) in &self.inner { if let Some(storage_key) = storage_key.as_ref() { let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?; new_child_roots.push((storage_key.clone(), ch.as_ref().into())); } else { root_map = Some(map); } } // root handling if let Some(map) = root_map.take() { root = Some(insert_into_memory_db::<H, _>( &mut mdb, map.clone().into_iter().chain(new_child_roots.into_iter()) )?); } let root = match root { Some(root) => root, None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?, }; self.trie = Some(TrieBackend::new(mdb, root)); self.trie.as_ref() } } /// Insert input pairs into memory db. pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out> where H: Hasher, I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>, { let mut root = <H as Hasher>::Out::default(); { let mut trie = TrieDBMut::<H>::new(mdb, &mut root); for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { warn!(target: "trie", "Failed to write to trie: {}", e); return None; } } } Some(root) }
{ Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) }
data.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Idiota object types tree - A tree (directory listing) object that represents the directory structure in a tree object. commit(ref) - A object that represents the changes in a single commit. blob - A blob object that represents a file or a piece of data. parent - A object that represents the ancestor to the commit in the DaG. tag - A object that represents a meta info. """ __author__ = "prakashsellathurai" __copyright__ = "Copyright 2021" __version__ = "1.0.1" __email__ = "[email protected]" import os import hashlib import shutil import json from collections import namedtuple from contextlib import contextmanager GIT_DIR = None RefValue = namedtuple('RefValue', ['symbolic', 'value']) @contextmanager
Args: new_dir (str): new git directory Yields: str: old git directory """ global GIT_DIR old_dir = GIT_DIR GIT_DIR = f'{new_dir}/.idiota' yield GIT_DIR = old_dir def init() -> None: """ Create .idiota directory Returns: None """ os.makedirs(GIT_DIR, exist_ok=True) os.makedirs(f'{GIT_DIR}/objects') def update_ref(ref, value, deref: bool=True) -> None: """ Update a ref Args: ref (str): ref name value (str): ref value deref (bool): dereference symbolic refs Returns: None """ # TODO: check if ref exists # TODO: check if value is valid # TODO: check if ref is symbolic ref = _get_ref_internal(ref, deref)[0] assert value.value if value.symbolic: value = f'ref: {value.value}' else: value = value.value ref_path = f'{GIT_DIR}/{ref}' os.makedirs(os.path.dirname(ref_path), exist_ok=True) with open(ref_path, 'w') as f: f.write(value) def get_ref(ref, deref=True) -> RefValue: """ Get a ref value Args: ref (str): ref name deref (bool): dereference symbolic refs Returns: RefValue(str): ref value """ return _get_ref_internal(ref, deref)[1] def delete_ref(ref, deref=True)->None: """ Delete a ref""" ref = _get_ref_internal(ref, deref)[0] os.remove(f'{GIT_DIR}/{ref}') def _get_ref_internal(ref, deref) -> RefValue: """ Get a ref value Args: ref (str): ref name deref (bool): dereference symbolic refs Returns: RefValue (str): ref value """ ref_path = f'{GIT_DIR}/{ref}' value = None if os.path.isfile(ref_path): with open(ref_path) as f: value = f.read().strip() symbolic = bool(value) and value.startswith('ref:') if symbolic: value = value.split(':', 1)[1].strip() if deref: return _get_ref_internal(value, deref=True) return ref, RefValue(symbolic=symbolic, value=value) def iter_refs(prefix='', deref=True): """ Iterate over refs Args: prefix (str): ref prefix deref (bool): dereference symbolic refs Returns: Iterator[Tup(str, RefValue)]: ref name and ref value """ refs = ['HEAD', 'MERGE_HEAD'] for root, _, filenames in os.walk(f'{GIT_DIR}/refs/'): root = os.path.relpath(root, GIT_DIR) refs.extend(f'{root}/{name}' for name in filenames) for refname in refs: if not refname.startswith(prefix): continue ref = get_ref(refname, deref=deref) if ref.value: yield refname, ref @contextmanager def get_index(): """ Get index Yields: Index: index """ index = {} if os.path.isfile(f'{GIT_DIR}/index'): with open(f'{GIT_DIR}/index') as f: index = json.load(f) yield index with open(f'{GIT_DIR}/index', 'w') as f: json.dump(index, f) def hash_object(data: object, type_='blob')-> str: """ Hash an object uses: Sha1 algorithm Args: data (bytes): object data Returns: str: object id """ obj = type_.encode() + b'\x00' + data oid = hashlib.sha1(obj).hexdigest() with open(f'{GIT_DIR}/objects/{oid}', 'wb') as out: out.write(obj) return oid def get_object(oid: str, expected='blob')-> object: """ get an object Args: oid (str): object id Returns: bytes: object data """ with open(f'{GIT_DIR}/objects/{oid}', 'rb') as f: obj = f.read() first_null = obj.index(b'\x00') type_ = obj[:first_null].decode() content = obj[first_null + 1:] if expected is not None: assert type_ == expected, f'Expected {expected}, got {type_}' return content def object_exists(oid: bool)-> bool: """ checks if object of given id exists in the repository Args: oid (str): object id Returns: bool: True if object exists """ return os.path.isfile(f'{GIT_DIR}/objects/{oid}') def fetch_object_if_missing(oid, remote_git_dir): """ fetch object from remote repository if it is not present in local repository Args: oid (str): object id remote_git_dir (str): remote git directory Returns: None """ if object_exists(oid): return remote_git_dir += '/.ugit' shutil.copy(f'{remote_git_dir}/objects/{oid}', f'{GIT_DIR}/objects/{oid}') def push_object(oid, remote_git_dir): """ push object to remote repository Args: oid (str): object id remote_git_dir (str): remote git directory Returns: None """ remote_git_dir += '/.ugit' shutil.copy(f'{GIT_DIR}/objects/{oid}', f'{remote_git_dir}/objects/{oid}')
def change_git_dir(new_dir) -> None: """ Change the current git directory
server.py
import jwt from flask import render_template, Flask, request, session, send_file import secrets from datetime import datetime import io from jwt import PyJWTError from werkzeug.exceptions import BadRequest from werkzeug.utils import redirect import pandas as pd
from microsetta_admin import metadata_util, upload_util from microsetta_admin.config_manager import SERVER_CONFIG from microsetta_admin._api import APIRequest import importlib.resources as pkg_resources TOKEN_KEY_NAME = 'token' SEND_EMAIL_CHECKBOX_DEFAULT_NAME = 'send_email' PUB_KEY = pkg_resources.read_text( 'microsetta_admin', "authrocket.pubkey") DUMMY_SELECT_TEXT = '-------' RECEIVED_TYPE_DROPDOWN = \ [DUMMY_SELECT_TEXT, "Blood (skin prick)", "Saliva", "Stool", "Sample Type Unclear (Swabs Included)"] VALID_STATUS = "sample-is-valid" NO_SOURCE_STATUS = "no-associated-source" NO_ACCOUNT_STATUS = "no-registered-account" NO_COLLECTION_INFO_STATUS = "no-collection-info" INCONSISTENT_SAMPLE_STATUS = "sample-has-inconsistencies" UNKNOWN_VALIDITY_STATUS = "received-unknown-validity" STATUS_OPTIONS = [DUMMY_SELECT_TEXT, VALID_STATUS, NO_SOURCE_STATUS, NO_ACCOUNT_STATUS, NO_COLLECTION_INFO_STATUS, INCONSISTENT_SAMPLE_STATUS, UNKNOWN_VALIDITY_STATUS] API_PROJECTS_URL = '/api/admin/projects' def handle_pyjwt(pyjwt_error): # PyJWTError (Aka, anything wrong with token) will force user to log out # and log in again return redirect('/logout') def parse_jwt(token): """ Raises ------ jwt.PyJWTError If the token is invalid """ decoded = jwt.decode(token, PUB_KEY, algorithms=['RS256'], verify=True) return decoded def build_login_variables(): # Anything that renders sitebase.html must pass down these variables to # jinja2 token_info = None if TOKEN_KEY_NAME in session: # If user leaves the page open, the token can expire before the # session, so if our token goes back we need to force them to login # again. token_info = parse_jwt(session[TOKEN_KEY_NAME]) vars = { 'endpoint': SERVER_CONFIG["endpoint"], 'ui_endpoint': SERVER_CONFIG["ui_endpoint"], 'authrocket_url': SERVER_CONFIG["authrocket_url"] } if token_info is not None: vars['email'] = token_info['email'] return vars def build_app(): # Create the application instance app = Flask(__name__) flask_secret = SERVER_CONFIG["FLASK_SECRET_KEY"] if flask_secret is None: print("WARNING: FLASK_SECRET_KEY must be set to run with gUnicorn") flask_secret = secrets.token_urlsafe(16) app.secret_key = flask_secret app.config['SESSION_TYPE'] = 'memcached' app.config['SESSION_COOKIE_NAME'] = 'session-microsetta-admin' # Set mapping from exception type to response code app.register_error_handler(PyJWTError, handle_pyjwt) return app app = build_app() @app.context_processor def utility_processor(): def format_timestamp(timestamp_str): if not timestamp_str: return "None" datetime_obj = datetime.fromisoformat(timestamp_str) return datetime_obj.strftime("%Y %B %d %H:%M:%S") return dict(format_timestamp=format_timestamp) @app.route('/') def home(): return render_template('sitebase.html', **build_login_variables()) @app.route('/search', methods=['GET']) def search(): return _search() @app.route('/search/sample', methods=['GET', 'POST']) def search_sample(): return _search('samples') @app.route('/search/kit', methods=['GET', 'POST']) def search_kit(): return _search('kit') @app.route('/search/email', methods=['GET', 'POST']) def search_email(): return _search('account') def _search(resource=None): if request.method == 'GET': return render_template('search.html', **build_login_variables()) elif request.method == 'POST': query = request.form['search_%s' % resource] status, result = APIRequest.get( '/api/admin/search/%s/%s' % (resource, query)) if status == 404: result = {'error_message': "Query not found"} return render_template('search_result.html', **build_login_variables(), result=result), 200 elif status == 200: return render_template('search_result.html', **build_login_variables(), resource=resource, result=result), 200 else: return result def _translate_nones(a_dict, do_none_to_str): # Note: this ISN'T a deep copy. This function is NOT set up # for recursing through a multi-layer dictionary result = a_dict.copy() for k, v in result.items(): if do_none_to_str and v is None: result[k] = "" elif not do_none_to_str and v == '': result[k] = None return result def _get_projects(include_stats, is_active): projects_uri = API_PROJECTS_URL + f"?include_stats={include_stats}" if is_active is not None: projects_uri += f"&is_active={is_active}" status, projects_output = APIRequest.get(projects_uri) if status >= 400: result = {'error_message': f"Unable to load project list: " f"{projects_uri}"} else: cleaned_projects = [_translate_nones(x, True) for x in projects_output] # if we're not using full project stats, sort # alphabetically by project name if not include_stats: cleaned_projects = sorted(cleaned_projects, key=lambda k: k['project_name']) result = {'projects': cleaned_projects} return status, result @app.route('/manage_projects', methods=['GET', 'POST']) def manage_projects(): result = None is_active = request.args.get('is_active', None) if request.method == 'POST': model = {x: request.form[x] for x in request.form} project_id = model.pop('project_id') model['is_microsetta'] = model.get('is_microsetta', '') == 'true' model['bank_samples'] = model.get('bank_samples', '') == 'true' model = _translate_nones(model, False) if project_id.isdigit(): # update (put) an existing project action = "update" status, api_output = APIRequest.put( '{}/{}'.format(API_PROJECTS_URL, project_id), json=model) else: # create (post) a new project action = "create" status, api_output = APIRequest.post( API_PROJECTS_URL, json=model) # if api post or put failed if status >= 400: result = {'error_message': f'Unable to {action} project.'} # end if post # if the above work (if any) didn't produce an error message, return # the projects list if result is None: _, result = _get_projects(include_stats=True, is_active=is_active) return render_template('manage_projects.html', **build_login_variables(), result=result), 200 @app.route('/email_stats', methods=['GET', 'POST']) def email_stats(): _, result = _get_projects(include_stats=False, is_active=True) projects = result.get('projects') if request.method == 'GET': project = request.args.get('project', None) email = request.args.get('email') if email is None: # They want to search for emails, show them the search dialog return render_template("email_stats_pulldown.html", **build_login_variables(), resource=None, search_error=None, projects=projects) emails = [email, ] elif request.method == 'POST': project = request.form.get('project', None) emails, upload_err = upload_util.parse_request_csv_col( request, 'file', 'email' ) if upload_err is not None: return render_template('email_stats_pulldown.html', **build_login_variables(), resource=None, search_error=[{'error': upload_err}], projects=projects) else: raise BadRequest() if project == "": project = None # de-duplicate emails = list({e.lower() for e in emails}) status, result = APIRequest.post( '/api/admin/account_email_summary', json={ "emails": emails, "project": project }) if status != 200: return render_template('email_stats_pulldown.html', search_error=[{'error': result}], resource=None, **build_login_variables(), projects=projects) # At a minimum, our table will display these columns. # We may show additional info depending on what comes back from the request base_data_template = { 'email': 'XXX', 'summary': 'XXX', 'account_id': 'XXX', 'creation_time': 'XXX', 'kit_name': 'XXX', 'project': 'XXX', 'unclaimed-samples-in-kit': 0, 'never-scanned': 0, 'sample-is-valid': 0, 'no-associated-source': 0, 'no-registered-account': 0, 'no-collection-info': 0, 'sample-has-inconsistencies': 0, 'received-unknown-validity': 0 } df = pd.DataFrame([base_data_template] + result) df = df.drop(0) # remove the template row numeric_cols = [ "unclaimed-samples-in-kit", "never-scanned", "sample-is-valid", "no-associated-source", "no-registered-account", "no-collection-info", "sample-has-inconsistencies", "received-unknown-validity" ] df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric) df[numeric_cols] = df[numeric_cols].fillna(0) def urlify_account_id(id_): if pd.isnull(id_): return "No associated account" else: ui_endpoint = SERVER_CONFIG['ui_endpoint'] account_url = f"{ui_endpoint}/accounts/{id_}" return f'<a target="_blank" href="{account_url}">{id_}</a>' # see https://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table # noqa df['account_id'] = df["account_id"].apply(urlify_account_id) return render_template("email_stats_pulldown.html", search_error=None, resource=df, **build_login_variables(), projects=projects) @app.route('/per_sample_summary', methods=['GET', 'POST']) def per_sample_summary(): # get a list of all projects in the system _, result = _get_projects(include_stats=False, is_active=True) projects = result.get('projects') # filter out any projects that don't belong to Microsetta projects = [x for x in projects if x['is_microsetta'] is True] # build a list of dictionaries with just the project id and the project # name. projects = [{'project_name': x['project_name'], 'project_id': x['project_id']} for x in projects] # determine if user wants sample ids stripped strip_sampleid = request.form.get('strip_sampleid', 'off') strip_sampleid = strip_sampleid.lower() == 'on' if request.method == 'GET': # If user arrived via GET then they are either here w/out # querying and they simply need the default webpage, or they are # querying with either a list of barcodes, or with a project id. # look for both parameters to determine which state we are in. sample_barcode = request.args.get('sample_barcode') project_id = request.args.get('project_id') if sample_barcode is None and project_id is None: # user just wants the default page. return render_template('per_sample_summary.html', resource=None, projects=projects, **build_login_variables()) if project_id is not None: # user wants to get summaries on all samples in a project. payload = {'project_id': project_id} status, result = APIRequest.post('/api/admin/account_barcode_summa' 'ry?strip_sampleid=False', json=payload) if status == 200: if result['partial_result'] is True: unprocessed_barcodes = result['unprocessed_barcodes'] else: unprocessed_barcodes = None resource = pd.DataFrame(result['samples']) order = ['sampleid', 'project', 'account-email', 'source-email', 'source-type', 'site-sampled', 'sample-status', 'sample-received', 'ffq-taken', 'ffq-complete', 'vioscreen_username'] order.extend(sorted(set(resource.columns) - set(order))) resource = resource[order] if unprocessed_barcodes: return render_template('per_sample_summary.html', resource=resource, projects=projects, error_message="Too many barcodes. S" "erver processed only" " the first 1000.", **build_login_variables()) else: return render_template('per_sample_summary.html', resource=resource, projects=projects, **build_login_variables()) else: return render_template('per_sample_summary.html', resource=None, projects=projects, error_message=result, **build_login_variables()) # if we are here then the user is querying using barcodes and we # simply need to set up the query below to perform. sample_barcodes = [sample_barcode, ] else: # assume POST, since there are only two methods defined in route. # if we are here, it is because the user is querying using an uploaded # file containing sample names. sample_barcodes, err = upload_util.parse_request_csv_col(request, 'file', 'sample_name') if err is not None: # there was an error. abort early. return render_template('per_sample_summary.html', resource=None, projects=projects, **build_login_variables(), search_error=[{'error': err}]) # perform the main query. payload = {'sample_barcodes': sample_barcodes} status, result = APIRequest.post('/api/admin/account_barcode_summary?stri' 'p_sampleid=%s' % str(strip_sampleid), json=payload) if status == 200: if result['partial_result'] is True: unprocessed_barcodes = result['unprocessed_barcodes'] else: unprocessed_barcodes = None resource = pd.DataFrame(result['samples']) order = ['sampleid', 'project', 'account-email', 'source-email', 'source-type', 'site-sampled', 'sample-status', 'sample-received', 'ffq-taken', 'ffq-complete', 'vioscreen_username'] order.extend(sorted(set(resource.columns) - set(order))) resource = resource[order] if unprocessed_barcodes: return render_template('per_sample_summary.html', resource=resource, projects=projects, error_message="Too many barcodes. S" "erver processed only" " the first 1000.", **build_login_variables()) else: return render_template('per_sample_summary.html', resource=resource, projects=projects, **build_login_variables()) else: return render_template('per_sample_summary.html', resource=None, projects=projects, error_message=result, **build_login_variables()) def _get_by_sample_barcode(sample_barcodes, strip_sampleid, projects): payload = {'sample_barcodes': sample_barcodes} status, result = APIRequest.post('/api/admin/account_barcode_summary?' 'strip_sampleid=%s' % str(strip_sampleid), json=payload) if status == 200: if result['partial_result'] is True: unprocessed_barcodes = result['unprocessed_barcodes'] else: unprocessed_barcodes = None resource = pd.DataFrame(result['samples']) order = ['sampleid', 'project', 'account-email', 'source-email', 'source-type', 'site-sampled', 'sample-status', 'sample-received', 'ffq-taken', 'ffq-complete', 'vioscreen_username'] order.extend(sorted(set(resource.columns) - set(order))) resource = resource[order] if unprocessed_barcodes: return render_template('per_sample_summary.html', resource=resource, projects=projects, error_message="Too many barcodes. S" "erver processed only" " the first 1000.", **build_login_variables()) else: return render_template('per_sample_summary.html', resource=resource, projects=projects, **build_login_variables()) else: return render_template('per_sample_summary.html', resource=None, projects=projects, error_message=result, **build_login_variables()) @app.route('/create_kits', methods=['GET', 'POST']) def new_kits(): _, result = _get_projects(include_stats=False, is_active=True) projects = result.get('projects') if request.method == 'GET': return render_template('create_kits.html', error_message=result.get('error_message'), projects=projects, **build_login_variables()) elif request.method == 'POST': num_kits = int(request.form['num_kits']) num_samples = int(request.form['num_samples']) prefix = request.form['prefix'] selected_project_ids = request.form.getlist('project_ids') payload = {'number_of_kits': num_kits, 'number_of_samples': num_samples, 'project_ids': selected_project_ids} if prefix: payload['kit_id_prefix'] = prefix status, result = APIRequest.post( '/api/admin/create/kits', json=payload) if status != 201: return render_template('create_kits.html', error_message='Failed to create kits', projects=projects, **build_login_variables()) # StringIO/BytesIO based off https://stackoverflow.com/a/45111660 buf = io.StringIO() payload = io.BytesIO() # explicitly expand out the barcode detail kits = pd.DataFrame(result['created']) for i in range(num_samples): kits['barcode_%d' % (i+1)] = [r['sample_barcodes'][i] for _, r in kits.iterrows()] kits.drop(columns='sample_barcodes', inplace=True) kits.to_csv(buf, sep=',', index=False, header=True) payload.write(buf.getvalue().encode('utf-8')) payload.seek(0) buf.close() stamp = datetime.now().strftime('%d%b%Y-%H%M') fname = f'kits-{stamp}.csv' return send_file(payload, as_attachment=True, attachment_filename=fname, mimetype='text/csv') def _check_sample_status(extended_barcode_info): warning = None in_microsetta_project = any( [x['is_microsetta'] for x in extended_barcode_info['projects_info']]) # one warning to rule them all; check in order of precendence if not in_microsetta_project: warning = UNKNOWN_VALIDITY_STATUS elif extended_barcode_info['account'] is None: warning = NO_ACCOUNT_STATUS elif extended_barcode_info['source'] is None: warning = NO_SOURCE_STATUS # collection datetime is used as the bellwether for the whole # set of sample collection info because it is relevant to all # kinds of samples (whereas previously used field, sample site, is not # filled when environmental samples are returned). elif extended_barcode_info['sample'].get('datetime_collected') is None: warning = NO_COLLECTION_INFO_STATUS return warning # Set up handlers for the cases, # GET to view the page, # POST to update info for a barcode -AND (possibly)- # email end user about the change in sample status, def _scan_get(sample_barcode, update_error): # If there is no sample_barcode in the GET # they still need to enter one in the box, so show empty page if sample_barcode is None: return render_template('scan.html', **build_login_variables()) # Assuming there is a sample barcode, grab that sample's information status, result = APIRequest.get( '/api/admin/search/samples/%s' % sample_barcode) # If we successfully grab it, show the page to the user if status == 200: # Process result in python because its easier than jinja2. status_warning = _check_sample_status(result) # check the latest scan to find the default sample_status for form latest_status = DUMMY_SELECT_TEXT if result['latest_scan']: latest_status = result['latest_scan']['sample_status'] account = result.get('account') events = [] if account: event_status, event_result = APIRequest.get( '/api/admin/events/accounts/%s' % account['id'] ) if event_status != 200: raise Exception("Couldn't pull event history") events = event_result return render_template( 'scan.html', **build_login_variables(), barcode_info=result["barcode_info"], projects_info=result['projects_info'], scans_info=result['scans_info'], latest_status=latest_status, dummy_status=DUMMY_SELECT_TEXT, status_options=STATUS_OPTIONS, send_email=session.get(SEND_EMAIL_CHECKBOX_DEFAULT_NAME, True), sample_info=result['sample'], extended_info=result, status_warning=status_warning, update_error=update_error, received_type_dropdown=RECEIVED_TYPE_DROPDOWN, source=result['source'], events=events ) elif status == 401: # If we fail due to unauthorized, need the user to log in again return redirect('/logout') elif status == 404: # If we fail due to not found, need to tell the user to pick a diff # barcode return render_template( 'scan.html', **build_login_variables(), search_error="Barcode %s Not Found" % sample_barcode, update_error=update_error, received_type_dropdown=RECEIVED_TYPE_DROPDOWN ) else: raise BadRequest() def _scan_post_update_info(sample_barcode, technician_notes, sample_status, action, issue_type, template, received_type, recorded_type): ### # Bugfix Part 1 for duplicate emails being sent. Theory is that client is # out of sync due to hitting back button after a scan has changed # state. # Can't test if client is up to date without ETags, so for right now, # we just validate whether or not they should send an email, duplicating # the client log. (This can still break with multiple admin clients, # but that is unlikely at the moment.) latest_status = None # TODO: Replace this with ETags! status, result = APIRequest.get( '/api/admin/search/samples/%s' % sample_barcode) if result['latest_scan']: latest_status = result['latest_scan']['sample_status'] ### # Do the actual update status, response = APIRequest.post( '/api/admin/scan/%s' % sample_barcode, json={ "sample_status": sample_status, "technician_notes": technician_notes } ) # if the update failed, keep track of the error so it can be displayed if status != 201: update_error = response return _scan_get(sample_barcode, update_error) else: update_error = None # If we're not supposed to send an email, go back to GET if action != "send_email": return _scan_get(sample_barcode, update_error) ### # Bugfix Part 2 for duplicate emails being sent. if sample_status == latest_status: # This is what we'll hit if javascript thinks it's updating status # but is out of sync with the database. update_error = "Ignoring Send Email, sample_status would " \ "not have been updated (Displayed page was out of " \ "sync)" return _scan_get(sample_barcode, update_error) ### # This is what we'll hit if there are no email templates to send for # the new sample status (or if we screw up javascript side :D ) if template is None: update_error = "Cannot Send Email: No Issue Type Specified " \ "(or no issue types available)" return _scan_get(sample_barcode, update_error) # Otherwise, send out an email to the end user status, response = APIRequest.post( '/api/admin/email', json={ "issue_type": issue_type, "template": template, "template_args": { "sample_barcode": sample_barcode, "recorded_type": recorded_type, "received_type": received_type } } ) # if the email failed to send, keep track of the error # so it can be displayed if status != 200: update_error = response else: update_error = None return _scan_get(sample_barcode, update_error) @app.route('/scan', methods=['GET', 'POST']) def scan(): # Now that the handlers are set up, parse the request to determine what # to do. # If its a get, grab the sample_barcode from the query string rather than # form parameters if request.method == 'GET': sample_barcode = request.args.get('sample_barcode') return _scan_get(sample_barcode, None) # If its a post, make the changes, then refresh the page if request.method == 'POST': # Without some extra ajax, we can't persist the send_email checkbox # until they actually post the form send_email = request.form.get('send_email', False) session[SEND_EMAIL_CHECKBOX_DEFAULT_NAME] = send_email sample_barcode = request.form['sample_barcode'] technician_notes = request.form['technician_notes'] sample_status = request.form['sample_status'] action = request.form.get('action') issue_type = request.form.get('issue_type') template = request.form.get('template') received_type = request.form.get('received_type') recorded_type = request.form.get('recorded_type') return _scan_post_update_info(sample_barcode, technician_notes, sample_status, action, issue_type, template, received_type, recorded_type) @app.route('/metadata_pulldown', methods=['GET', 'POST']) def metadata_pulldown(): allow_missing = request.form.get('allow_missing_samples', False) if request.method == 'GET': sample_barcode = request.args.get('sample_barcode') # If there is no sample_barcode in the GET # they still need to enter one in the box, so show empty page if sample_barcode is None: return render_template('metadata_pulldown.html', **build_login_variables()) sample_barcodes = [sample_barcode] elif request.method == 'POST': sample_barcodes, upload_err = upload_util.parse_request_csv_col( request, 'file', 'sample_name' ) if upload_err is not None: return render_template('metadata_pulldown.html', **build_login_variables(), search_error=[{'error': upload_err}]) else: raise BadRequest() df, errors = metadata_util.retrieve_metadata(sample_barcodes) # Strangely, these api requests are returning an html error page rather # than a machine parseable json error response object with message. # This is almost certainly due to error handling for the cohosted minimal # client. In future, we should just pass down whatever the api says here. if len(errors) == 0 or allow_missing: df = metadata_util.drop_private_columns(df) # TODO: Streaming direct from pandas is a pain. Need to search for # better ways to iterate and chunk this file as we generate it strstream = io.StringIO() df.to_csv(strstream, sep='\t', index=True, header=True) # TODO: utf-8 or utf-16 encoding?? bytestream = io.BytesIO() bytestream.write(strstream.getvalue().encode('utf-8')) bytestream.seek(0) strstream.close() return send_file(bytestream, mimetype="text/tab-separated-values", as_attachment=True, attachment_filename="metadata_pulldown.tsv", add_etags=False, cache_timeout=None, conditional=False, last_modified=None, ) else: return render_template('metadata_pulldown.html', **build_login_variables(), info={'barcodes': sample_barcodes}, search_error=errors) @app.route('/submit_daklapack_order', methods=['GET']) def submit_daklapack_order(): error_msg_key = "error_message" def return_error(msg): return render_template('submit_daklapack_order.html', **build_login_variables(), error_message=msg) status, dak_articles_output = APIRequest.get( '/api/admin/daklapack_articles') if status >= 400: return return_error("Unable to load daklapack articles list.") status, projects_output = _get_projects(include_stats=False, is_active=True) if status >= 400: return return_error(projects_output[error_msg_key]) return render_template('submit_daklapack_order.html', **build_login_variables(), error_message=None, dummy_status=DUMMY_SELECT_TEXT, dak_articles=dak_articles_output, contact_phone_number=SERVER_CONFIG[ "order_contact_phone"], projects=projects_output['projects']) @app.route('/submit_daklapack_order', methods=['POST']) def post_submit_daklapack_order(): def return_error(msg): return render_template('submit_daklapack_order.html', **build_login_variables(), error_message=msg) error_message = success_submissions = failure_submissions = headers = None expected_headers = ["firstName", "lastName", "address1", "insertion", "address2", "postalCode", "city", "state", "country", "countryCode"] # get required fields; cast where expected by api phone_number = request.form['contact_phone_number'] project_ids_list = list(map(int, request.form.getlist('projects'))) dak_article_code = request.form['dak_article_code'] article_quantity = int(request.form['quantity']) file = request.files['addresses_file'] # get optional fields or defaults planned_send_str = request.form.get('planned_send_date') planned_send_date = planned_send_str if planned_send_str else None description = request.form.get('description') fedex_ref_1 = request.form.get('fedex_ref_1') fedex_ref_2 = request.form.get('fedex_ref_2') fedex_ref_3 = request.form.get('fedex_ref_3') try: # NB: import everything as a string so that zip codes beginning with # zero (e.g., 06710) don't get silently cast to numbers if file.filename.endswith('xls'): addresses_df = pd.read_excel(file, dtype=str) elif file.filename.endswith('xlsx'): addresses_df = pd.read_excel(file, engine='openpyxl', dtype=str) else: raise ValueError(f"Unrecognized extension on putative excel " f"filename: {file.filename}") headers = list(addresses_df.columns) except Exception as e: # noqa return return_error('Could not parse addresses file') if headers != expected_headers: return return_error(f"Received column names {headers} do " f"not match expected column names" f" {expected_headers}") # add (same) contact phone number to every address addresses_df['phone'] = phone_number addresses_df = addresses_df.fillna("") temp_dict = addresses_df.to_dict(orient='index') addresses_list = [temp_dict[n] for n in range(len(temp_dict))] status, post_output = APIRequest.post( '/api/admin/daklapack_orders', json={ "project_ids": project_ids_list, "article_code": dak_article_code, "quantity": article_quantity, "addresses": addresses_list, "planned_send_date": planned_send_date, "description": description, "fedex_ref_1": fedex_ref_1, "fedex_ref_2": fedex_ref_2, "fedex_ref_3": fedex_ref_3 } ) # if the post failed, keep track of the error so it can be displayed if status != 200: error_message = post_output else: order_submissions = post_output["order_submissions"] success_submissions = [x for x in order_submissions if x["order_success"]] failure_submissions = [x for x in order_submissions if not x["order_success"]] return render_template('submit_daklapack_order.html', **build_login_variables(), error_message=error_message, success_submissions=success_submissions, failure_submissions=failure_submissions) @app.route('/authrocket_callback') def authrocket_callback(): token = request.args.get('token') session[TOKEN_KEY_NAME] = token return redirect("/") @app.route('/logout') def logout(): if TOKEN_KEY_NAME in session: del session[TOKEN_KEY_NAME] return redirect("/") # If we're running in stand alone mode, run the application if __name__ == '__main__': if SERVER_CONFIG["ssl_cert_path"] and SERVER_CONFIG["ssl_key_path"]: ssl_context = ( SERVER_CONFIG["ssl_cert_path"], SERVER_CONFIG["ssl_key_path"] ) else: ssl_context = None app.run( port=SERVER_CONFIG['port'], debug=SERVER_CONFIG['debug'], ssl_context=ssl_context )
transaction.rs
use std::{ collections::HashMap, future::Future, iter::FromIterator, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, orchard, parameters::{Network, NetworkUpgrade}, primitives::Groth16Proof, sapling, transaction::{self, HashType, SigHash, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; #[cfg(test)] mod tests; /// Asynchronous transaction verification. /// /// # Correctness /// /// Transaction verification requests should be wrapped in a timeout, so that /// out-of-order and invalid requests do not hang indefinitely. See the [`chain`](`crate::chain`) /// module documentation for details. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { Self { network, script_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum Request { /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, transparent::OrderedUtxo>>, /// The height of the block containing this transaction. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. /// /// Mempool transactions do not have any additional UTXOs. /// /// Note: coinbase transactions are invalid in the mempool Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// The height of the next block. /// /// The next block is the first block that could possibly contain a /// mempool transaction. height: block::Height, }, } impl Request { /// The transaction to verify that's in this request. pub fn transaction(&self) -> Arc<Transaction> { match self { Request::Block { transaction, .. } => transaction.clone(), Request::Mempool { transaction, .. } => transaction.clone(), } } /// The set of additional known unspent transaction outputs that's in this request. pub fn known_utxos(&self) -> Arc<HashMap<transparent::OutPoint, transparent::OrderedUtxo>> { match self { Request::Block { known_utxos, .. } => known_utxos.clone(), Request::Mempool { .. } => HashMap::new().into(), } } /// The height used to select the consensus rules for verifying this transaction. pub fn height(&self) -> block::Height { match self { Request::Block { height, .. } | Request::Mempool { height, .. } => *height, } } /// The network upgrade to consider for the verification. /// /// This is based on the block height from the request, and the supplied `network`. pub fn upgrade(&self, network: Network) -> NetworkUpgrade { NetworkUpgrade::current(network, self.height()) } } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future { let is_mempool = match req { Request::Block { .. } => false, Request::Mempool { .. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!("Zebra does not yet have a mempool (#2309)"); } let script_verifier = self.script_verifier.clone(); let network = self.network; let tx = req.transaction(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); // Do basic checks first check::has_inputs_and_outputs(&tx)?; if tx.is_coinbase() { check::coinbase_tx_no_prevout_joinsplit_spend(&tx)?; } // [Canopy onward]: `vpub_old` MUST be zero. // https://zips.z.cash/protocol/protocol.pdf#joinsplitdesc check::disabled_add_to_sprout_pool(&tx, req.height(), network)?; // "The consensus rules applied to valueBalance, vShieldedOutput, and bindingSig // in non-coinbase transactions MUST also be applied to coinbase transactions." // // This rule is implicitly implemented during Sapling and Orchard verification, // because they do not distinguish between coinbase and non-coinbase transactions. // // Note: this rule originally applied to Sapling, but we assume it also applies to Orchard. // // https://zips.z.cash/zip-0213#specification let async_checks = match tx.as_ref() { Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => { tracing::debug!(?tx, "got transaction with wrong version"); return Err(TransactionError::WrongVersion); } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => Self::verify_v4_transaction( req, network, script_verifier, inputs, joinsplit_data, sapling_shielded_data, )?, Transaction::V5 { inputs, sapling_shielded_data, orchard_shielded_data, .. } => Self::verify_v5_transaction( req, network, script_verifier, inputs, sapling_shielded_data, orchard_shielded_data, )?, }; async_checks.check().await?; Ok(tx.hash()) } .instrument(span) .boxed() } } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { /// Verify a V4 transaction. /// /// Returns a set of asynchronous checks that must all succeed for the transaction to be /// considered valid. These checks include: /// /// - transparent transfers /// - sprout shielded data /// - sapling shielded data /// /// The parameters of this method are: /// /// - the `request` to verify (that contains the transaction and other metadata, see [`Request`] /// for more information) /// - the `network` to consider when verifying /// - the `script_verifier` to use for verifying the transparent transfers /// - the transparent `inputs` in the transaction /// - the Sprout `joinsplit_data` shielded data in the transaction /// - the `sapling_shielded_data` in the transaction fn verify_v4_transaction( request: Request, network: Network, script_verifier: script::Verifier<ZS>, inputs: &[transparent::Input], joinsplit_data: &Option<transaction::JoinSplitData<Groth16Proof>>, sapling_shielded_data: &Option<sapling::ShieldedData<sapling::PerSpendAnchor>>, ) -> Result<AsyncChecks, TransactionError> { let tx = request.transaction(); let upgrade = request.upgrade(network); let shielded_sighash = tx.sighash(upgrade, HashType::ALL, None); Ok( Self::verify_transparent_inputs_and_outputs( &request, network, inputs, script_verifier, )? .and(Self::verify_sprout_shielded_data( joinsplit_data, &shielded_sighash, )) .and(Self::verify_sapling_shielded_data( sapling_shielded_data, &shielded_sighash, )?), ) } /// Verify a V5 transaction. /// /// Returns a set of asynchronous checks that must all succeed for the transaction to be /// considered valid. These checks include: /// /// - transaction support by the considered network upgrade (see [`Request::upgrade`]) /// - transparent transfers /// - sapling shielded data (TODO) /// - orchard shielded data (TODO) /// /// The parameters of this method are: /// /// - the `request` to verify (that contains the transaction and other metadata, see [`Request`] /// for more information) /// - the `network` to consider when verifying /// - the `script_verifier` to use for verifying the transparent transfers /// - the transparent `inputs` in the transaction /// - the sapling shielded data of the transaction, if any /// - the orchard shielded data of the transaction, if any fn verify_v5_transaction( request: Request, network: Network, script_verifier: script::Verifier<ZS>, inputs: &[transparent::Input], sapling_shielded_data: &Option<sapling::ShieldedData<sapling::SharedAnchor>>, orchard_shielded_data: &Option<orchard::ShieldedData>, ) -> Result<AsyncChecks, TransactionError> { let transaction = request.transaction(); let upgrade = request.upgrade(network); let shielded_sighash = transaction.sighash(upgrade, HashType::ALL, None); Self::verify_v5_transaction_network_upgrade(&transaction, upgrade)?; let _async_checks = Self::verify_transparent_inputs_and_outputs( &request, network, inputs, script_verifier, )? .and(Self::verify_sapling_shielded_data( sapling_shielded_data, &shielded_sighash, )?) .and(Self::verify_orchard_shielded_data( orchard_shielded_data, &shielded_sighash, )?); // TODO: // - verify orchard shielded pool (ZIP-224) (#2105) // - ZIP-216 (#1798) // - ZIP-244 (#1874) // - remaining consensus rules (#2379) // - remove `should_panic` from tests unimplemented!("V5 transaction validation is not yet complete"); } /// Verifies if a V5 `transaction` is supported by `network_upgrade`. fn verify_v5_transaction_network_upgrade( transaction: &Transaction, network_upgrade: NetworkUpgrade, ) -> Result<(), TransactionError> { match network_upgrade { // Supports V5 transactions NetworkUpgrade::Nu5 => Ok(()), // Does not support V5 transactions NetworkUpgrade::Genesis | NetworkUpgrade::BeforeOverwinter | NetworkUpgrade::Overwinter | NetworkUpgrade::Sapling | NetworkUpgrade::Blossom | NetworkUpgrade::Heartwood | NetworkUpgrade::Canopy => Err(TransactionError::UnsupportedByNetworkUpgrade( transaction.version(), network_upgrade, )), } } /// Verifies if a transaction's transparent `inputs` are valid using the provided /// `script_verifier`. fn verify_transparent_inputs_and_outputs( request: &Request, network: Network, inputs: &[transparent::Input], script_verifier: script::Verifier<ZS>, ) -> Result<AsyncChecks, TransactionError> { let transaction = request.transaction(); if transaction.is_coinbase() { // The script verifier only verifies PrevOut inputs and their corresponding UTXOs. // Coinbase transactions don't have any PrevOut inputs. Ok(AsyncChecks::new()) } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(transaction)); let known_utxos = request.known_utxos(); let upgrade = request.upgrade(network); let script_checks = (0..inputs.len()) .into_iter() .map(move |input_index| { let request = script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }; script_verifier.clone().oneshot(request) }) .collect(); Ok(script_checks) } } /// Verifies a transaction's Sprout shielded join split data. fn verify_sprout_shielded_data( joinsplit_data: &Option<transaction::JoinSplitData<Groth16Proof>>, shielded_sighash: &SigHash, ) -> AsyncChecks { let mut checks = AsyncChecks::new(); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while // adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. // // https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let ed25519_item = (joinsplit_data.pub_key, joinsplit_data.sig, shielded_sighash).into(); checks.push(ed25519_verifier.oneshot(ed25519_item)); } checks } /// Verifies a transaction's Sapling shielded data. fn verify_sapling_shielded_data<A>( sapling_shielded_data: &Option<sapling::ShieldedData<A>>, shielded_sighash: &SigHash, ) -> Result<AsyncChecks, TransactionError> where A: sapling::AnchorVariant + Clone, sapling::Spend<sapling::PerSpendAnchor>: From<(sapling::Spend<A>, A::Shared)>, { let mut async_checks = AsyncChecks::new(); if let Some(sapling_shielded_data) = sapling_shielded_data { for spend in sapling_shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. async_checks.push( primitives::groth16::SPEND_VERIFIER .clone() .oneshot(primitives::groth16::ItemWrapper::from(&spend).into()), ); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. async_checks.push( primitives::redjubjub::VERIFIER .clone() .oneshot((spend.rk, spend.spend_auth_sig, shielded_sighash).into()), ); } for output in sapling_shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. async_checks.push( primitives::groth16::OUTPUT_VERIFIER .clone() .oneshot(primitives::groth16::ItemWrapper::from(output).into()), ); } let bvk = sapling_shielded_data.binding_verification_key(); async_checks.push( primitives::redjubjub::VERIFIER .clone() .oneshot((bvk, sapling_shielded_data.binding_sig, &shielded_sighash).into()), ); } Ok(async_checks) } /// Verifies a transaction's Orchard shielded data. fn verify_orchard_shielded_data( orchard_shielded_data: &Option<orchard::ShieldedData>, shielded_sighash: &SigHash, ) -> Result<AsyncChecks, TransactionError> { let mut async_checks = AsyncChecks::new(); if let Some(orchard_shielded_data) = orchard_shielded_data { for authorized_action in orchard_shielded_data.actions.iter().cloned() { let (action, spend_auth_sig) = authorized_action.into_parts(); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedPallas spend // authorization signature for each Action // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. async_checks.push( primitives::redpallas::VERIFIER .clone() .oneshot((action.rk, spend_auth_sig, &shielded_sighash).into()), ); } let bvk = orchard_shielded_data.binding_verification_key(); async_checks.push( primitives::redpallas::VERIFIER .clone() .oneshot((bvk, orchard_shielded_data.binding_sig, &shielded_sighash).into()), ); } Ok(async_checks) } } /// A set of unordered asynchronous checks that should succeed. /// /// A wrapper around [`FuturesUnordered`] with some auxiliary methods. struct AsyncChecks(Fu
red<Pin<Box<dyn Future<Output = Result<(), BoxError>> + Send>>>); impl AsyncChecks { /// Create an empty set of unordered asynchronous checks. pub fn new() -> Self { AsyncChecks(FuturesUnordered::new()) } /// Push a check into the set. pub fn push(&mut self, check: impl Future<Output = Result<(), BoxError>> + Send + 'static) { self.0.push(check.boxed()); } /// Push a set of checks into the set. /// /// This method can be daisy-chained. pub fn and(mut self, checks: AsyncChecks) -> Self { self.0.extend(checks.0); self } /// Wait until all checks in the set finish. /// /// If any of the checks fail, this method immediately returns the error and cancels all other /// checks by dropping them. async fn check(mut self) -> Result<(), BoxError> { // Wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = self.0.next().await { tracing::trace!(?check, remaining = self.0.len()); check?; } Ok(()) } } impl<F> FromIterator<F> for AsyncChecks where F: Future<Output = Result<(), BoxError>> + Send + 'static, { fn from_iter<I>(iterator: I) -> Self where I: IntoIterator<Item = F>, { AsyncChecks(iterator.into_iter().map(FutureExt::boxed).collect()) } }
turesUnorde
reducer.js
/* * * LoginPage reducer * */ import { fromJS } from 'immutable'; import { SET_ERROR } from './constants'; const initialState = fromJS({ error: '', loading: false, }); function
(state = initialState, action) { switch (action.type) { case SET_ERROR: return state.set('error', action.error); default: return state; } } export default loginPageReducer;
loginPageReducer
0019_user_avatar.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-25 08:45 from __future__ import unicode_literals from django.db import migrations, models
dependencies = [ ('blog', '0018_auto_20170625_1616'), ] operations = [ migrations.AddField( model_name='user', name='avatar', field=models.ImageField(blank=True, default='avatar/default.png', upload_to='avatar/%Y/%m'), ), ]
class Migration(migrations.Migration):
dirty-input.ts
import { IValidateAndI18nKey } from '../validation/validate'; import { TranslatedValueOrKey } from './translation'; export interface IDirtyInput<T> {
dirty?: boolean; onMadeDirty?: () => void; valid?: boolean; onValidChange?: (isValid: boolean) => void; onChange?: (t: T) => void; validation?: Array<IValidateAndI18nKey<T>>; validMessage?: TranslatedValueOrKey<T>; helpMessage?: TranslatedValueOrKey<T>; }
rst_roles.py
from docutils.parsers.rst import roles from docutils import nodes from docutils.parsers.rst.states import Inliner import docutils.parsers.rst.roles def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]): """ USAGE: :del:`your context` :param role: my-strike :param rawtext: :my-strike:`your context` :param text: your context :param lineno: :param inliner: :param options: :param content: :return: """ # roles.set_classes(options) # options.setdefault('classes', []).append("mys") node = nodes.inline(rawtext, text, **dict(classes=['strike'])) return [node], [] def setup(app):
roles.register_canonical_role('del', strike_role)
thread_pool.rs
use crate::enter; use crate::unpark_mutex::UnparkMutex; use futures_core::future::{Future, FutureObj}; use futures_core::task::{Poll, Wake, Spawn, SpawnError}; use futures_util::future::FutureExt; use futures_util::task::local_waker_ref_from_nonlocal; use num_cpus; use std::io; use std::prelude::v1::*; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc; use std::thread; use std::fmt; /// A general-purpose thread pool for scheduling tasks that poll futures to /// completion. /// /// The thread pool multiplexes any number of tasks onto a fixed number of /// worker threads. /// /// This type is a clonable handle to the threadpool itself. /// Cloning it will only create a new reference, not a new threadpool. pub struct ThreadPool { state: Arc<PoolState>, } /// Thread pool configuration object. pub struct ThreadPoolBuilder { pool_size: usize, stack_size: usize, name_prefix: Option<String>, after_start: Option<Arc<dyn Fn(usize) + Send + Sync>>, before_stop: Option<Arc<dyn Fn(usize) + Send + Sync>>, } trait AssertSendSync: Send + Sync {} impl AssertSendSync for ThreadPool {} struct PoolState { tx: Mutex<mpsc::Sender<Message>>, rx: Mutex<mpsc::Receiver<Message>>, cnt: AtomicUsize, size: usize, } impl fmt::Debug for ThreadPool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ThreadPool") .field("size", &self.state.size) .finish() } } impl fmt::Debug for ThreadPoolBuilder { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ThreadPoolBuilder") .field("pool_size", &self.pool_size) .field("name_prefix", &self.name_prefix) .finish() } } enum Message { Run(Task), Close, } impl ThreadPool { /// Creates a new thread pool with the default configuration. /// /// See documentation for the methods in /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default /// configuration. #[allow(clippy::new_ret_no_self)] pub fn new() -> Result<ThreadPool, io::Error> { ThreadPoolBuilder::new().create() } /// Create a default thread pool configuration, which can then be customized. /// /// See documentation for the methods in /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default /// configuration. pub fn builder() -> ThreadPoolBuilder { ThreadPoolBuilder::new() } /// Runs the given future with this thread pool as the default spawner for /// spawning tasks. /// /// **This function will block the calling thread** until the given future /// is complete. While executing that future, any tasks spawned onto the /// default spawner will be routed to this thread pool. /// /// Note that the function will return when the provided future completes, /// even if some of the tasks it spawned are still running. pub fn run<F: Future>(&mut self, f: F) -> F::Output { crate::LocalPool::new().run_until(f) } } impl Spawn for ThreadPool { fn spawn_obj( &mut self, future: FutureObj<'static, ()>, ) -> Result<(), SpawnError> { (&*self).spawn_obj(future) } } impl Spawn for &ThreadPool { fn spawn_obj( &mut self, future: FutureObj<'static, ()>, ) -> Result<(), SpawnError> { let task = Task { future, wake_handle: Arc::new(WakeHandle { exec: self.clone(), mutex: UnparkMutex::new(), }), exec: self.clone(), }; self.state.send(Message::Run(task)); Ok(()) } } impl PoolState { fn send(&self, msg: Message) { self.tx.lock().unwrap().send(msg).unwrap(); } fn work(&self, idx: usize, after_start: Option<Arc<dyn Fn(usize) + Send + Sync>>, before_stop: Option<Arc<dyn Fn(usize) + Send + Sync>>) { let _scope = enter().unwrap(); if let Some(after_start) = after_start { after_start(idx); } loop { let msg = self.rx.lock().unwrap().recv().unwrap(); match msg { Message::Run(task) => task.run(), Message::Close => break, } } if let Some(before_stop) = before_stop { before_stop(idx); } } } impl Clone for ThreadPool { fn clone(&self) -> ThreadPool { self.state.cnt.fetch_add(1, Ordering::Relaxed); ThreadPool { state: self.state.clone() } } } impl Drop for ThreadPool { fn drop(&mut self) { if self.state.cnt.fetch_sub(1, Ordering::Relaxed) == 1 { for _ in 0..self.state.size { self.state.send(Message::Close); } } } } impl ThreadPoolBuilder { /// Create a default thread pool configuration. /// /// See the other methods on this type for details on the defaults. pub fn new() -> ThreadPoolBuilder { ThreadPoolBuilder { pool_size: num_cpus::get(), stack_size: 0, name_prefix: None, after_start: None, before_stop: None, } } /// Set size of a future ThreadPool /// /// The size of a thread pool is the number of worker threads spawned. By /// default, this is equal to the number of CPU cores. pub fn pool_size(&mut self, size: usize) -> &mut Self { self.pool_size = size; self } /// Set stack size of threads in the pool. /// /// By default, worker threads use Rust's standard stack size. pub fn stack_size(&mut self, stack_size: usize) -> &mut Self { self.stack_size = stack_size; self } /// Set thread name prefix of a future ThreadPool. /// /// Thread name prefix is used for generating thread names. For example, if prefix is /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc. /// /// By default, worker threads are assigned Rust's standard thread name. pub fn name_prefix<S: Into<String>>(&mut self, name_prefix: S) -> &mut Self { self.name_prefix = Some(name_prefix.into()); self } /// Execute the closure `f` immediately after each worker thread is started, /// but before running any tasks on it. /// /// This hook is intended for bookkeeping and monitoring. /// The closure `f` will be dropped after the `builder` is dropped /// and all worker threads in the pool have executed it. /// /// The closure provided will receive an index corresponding to the worker /// thread it's running on. pub fn after_start<F>(&mut self, f: F) -> &mut Self where F: Fn(usize) + Send + Sync + 'static { self.after_start = Some(Arc::new(f)); self } /// Execute closure `f` just prior to shutting down each worker thread. /// /// This hook is intended for bookkeeping and monitoring. /// The closure `f` will be dropped after the `builder` is droppped /// and all threads in the pool have executed it. /// /// The closure provided will receive an index corresponding to the worker /// thread it's running on. pub fn before_stop<F>(&mut self, f: F) -> &mut Self where F: Fn(usize) + Send + Sync + 'static { self.before_stop = Some(Arc::new(f)); self } /// Create a [`ThreadPool`](ThreadPool) with the given configuration. /// /// # Panics /// /// Panics if `pool_size == 0`. pub fn create(&mut self) -> Result<ThreadPool, io::Error> { let (tx, rx) = mpsc::channel(); let pool = ThreadPool { state: Arc::new(PoolState { tx: Mutex::new(tx), rx: Mutex::new(rx), cnt: AtomicUsize::new(1), size: self.pool_size, }), }; assert!(self.pool_size > 0); for counter in 0..self.pool_size { let state = pool.state.clone(); let after_start = self.after_start.clone(); let before_stop = self.before_stop.clone(); let mut thread_builder = thread::Builder::new(); if let Some(ref name_prefix) = self.name_prefix { thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter)); } if self.stack_size > 0 { thread_builder = thread_builder.stack_size(self.stack_size); } thread_builder.spawn(move || state.work(counter, after_start, before_stop))?; } Ok(pool) } } impl Default for ThreadPoolBuilder { fn default() -> Self
} /// A task responsible for polling a future to completion. struct Task { future: FutureObj<'static, ()>, exec: ThreadPool, wake_handle: Arc<WakeHandle>, } struct WakeHandle { mutex: UnparkMutex<Task>, exec: ThreadPool, } impl Task { /// Actually run the task (invoking `poll` on the future) on the current /// thread. pub fn run(self) { let Task { mut future, wake_handle, mut exec } = self; let local_waker = local_waker_ref_from_nonlocal(&wake_handle); // Safety: The ownership of this `Task` object is evidence that // we are in the `POLLING`/`REPOLL` state for the mutex. unsafe { wake_handle.mutex.start_poll(); loop { let res = future.poll_unpin(&local_waker); match res { Poll::Pending => {} Poll::Ready(()) => return wake_handle.mutex.complete(), } let task = Task { future, wake_handle: wake_handle.clone(), exec, }; match wake_handle.mutex.wait(task) { Ok(()) => return, // we've waited Err(task) => { // someone's notified us future = task.future; exec = task.exec; } } } } } } impl fmt::Debug for Task { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Task") .field("contents", &"...") .finish() } } impl Wake for WakeHandle { fn wake(arc_self: &Arc<Self>) { match arc_self.mutex.notify() { Ok(task) => arc_self.exec.state.send(Message::Run(task)), Err(()) => {} } } } #[cfg(test)] mod tests { use super::*; use std::sync::mpsc; #[test] fn test_drop_after_start() { let (tx, rx) = mpsc::sync_channel(2); let _cpu_pool = ThreadPoolBuilder::new() .pool_size(2) .after_start(move |_| tx.send(1).unwrap()).create().unwrap(); // After ThreadPoolBuilder is deconstructed, the tx should be droped // so that we can use rx as an iterator. let count = rx.into_iter().count(); assert_eq!(count, 2); } }
{ Self::new() }
likely_subtags.rs
use unic_langid::LanguageIdentifier; static REGION_MATCHING_KEYS: &[&str] = &[ "az", "bg", "cs", "de", "es", "fi", "fr", "hu", "it", "lt", "lv", "nl", "pl", "ro", "ru", ]; pub trait MockLikelySubtags { fn add_likely_subtags(&mut self) -> bool; } impl MockLikelySubtags for LanguageIdentifier { fn add_likely_subtags(&mut self) -> bool { let extended = match self.to_string().as_str() { "en" => "en-Latn-US", "fr" => "fr-Latn-FR", "sr" => "sr-Cyrl-SR", "sr-RU" => "sr-Latn-SR", "az-IR" => "az-Arab-IR", "zh-GB" => "zh-Hant-GB", "zh-US" => "zh-Hant-US", _ => { let lang = self.get_language(); for subtag in REGION_MATCHING_KEYS { if lang == *subtag { self.set_region(subtag).unwrap(); return true; } } return false; }
if let Some(subtag) = langid.get_script() { self.set_script(subtag).unwrap(); } else { self.clear_script(); } if let Some(subtag) = langid.get_region() { self.set_region(subtag).unwrap(); } else { self.clear_region(); } true } }
}; let langid: LanguageIdentifier = extended.parse().expect("Failed to parse langid."); self.set_language(langid.get_language()).unwrap();
cp_regressor.rs
use super::utils::ref_to_slice; use crate::nonparametric::GaussianProcessRegressor; use crate::{DistributionError, ExactMultivariateStudentTParams, StudentTParams}; use crate::{MultivariateStudentTParams, RandomVariable}; use opensrdk_kernel_method::PositiveDefiniteKernel; use opensrdk_linear_algebra::pp::trf::PPTRF; use opensrdk_linear_algebra::{SymmetricPackedMatrix, Vector}; pub trait CauchyProcessRegressor<K, T>: GaussianProcessRegressor<K, T> where K: PositiveDefiniteKernel<T>, T: RandomVariable, { fn cp_predict(&self, xs: &T) -> Result<StudentTParams, DistributionError> { let fs = self.cp_predict_multivariate(ref_to_slice(xs))?; Ok(StudentTParams::new( fs.nu(), fs.mu()[0], fs.lsigma().0.elems()[0], )?) } fn cp_predict_multivariate( &self, xs: &[T], ) -> Result<ExactMultivariateStudentTParams, DistributionError>; } impl<K, T, GPR> CauchyProcessRegressor<K, T> for GPR where K: PositiveDefiniteKernel<T>, T: RandomVariable, GPR: GaussianProcessRegressor<K, T>, { fn
( &self, xs: &[T], ) -> Result<ExactMultivariateStudentTParams, DistributionError> { let n = self.mu().len(); let mahalanobis_squared = self.mahalanobis_squared(); let (mu, lsigma) = self.gp_predict_multivariate(xs)?.eject(); let coefficient = ((1.0 + mahalanobis_squared) / (1 + n) as f64).sqrt(); let new_lsigma = PPTRF( SymmetricPackedMatrix::from(mu.len(), (coefficient * lsigma.0.eject().col_mat()).vec()) .unwrap(), ); ExactMultivariateStudentTParams::new((1 + n) as f64, mu, new_lsigma) } }
cp_predict_multivariate
io.go
package iio import ( "bytes" "io/ioutil" "os" "go.uber.org/zap" ) func ReadEntireFile(logger *zap.Logger, file string) []byte { b, err := ioutil.ReadFile(file) if err != nil { logger.Warn("ReadEntireFile failed", zap.Error(err)) return nil } return b } func SplitLines(b []byte) [][]byte { return bytes.Split(b, []byte{'\n'}) } func SkipWhitespace(data []byte) []byte { for idx, ch := range data { if ch != ' ' { return data[idx:] } } return nil } func NextChunk(data []byte) ([]byte, []byte)
func ReadEntries(logger *zap.Logger, path string) []os.FileInfo { dir, err := os.Open(path) if err != nil { logger.Warn("failed to open directory for reading", zap.String("path", path), zap.Error(err)) return nil } entries, err := dir.Readdir(-1) _ = dir.Close() if err != nil { logger.Warn("failed to read directories", zap.String("path", path), zap.Error(err)) return nil } return entries }
{ data = SkipWhitespace(data) if len(data) == 0 { return nil, nil } for idx, ch := range data { if ch == ' ' { return data[:idx], SkipWhitespace(data[idx:]) } } return data, nil // no whitespace, return the whole chunk }
ui.rs
// This file is generated. Do not edit // @generated // https://github.com/Manishearth/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy)] #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(box_pointers)] #![allow(dead_code)] #![allow(missing_docs)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![allow(trivial_casts)] #![allow(unsafe_code)] #![allow(unused_imports)] #![allow(unused_results)] use protobuf::Message as Message_imported_for_functions; use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions; #[derive(PartialEq,Clone,Default)] pub struct ObservationUI { // message fields groups: ::protobuf::RepeatedField<ControlGroup>, // message oneof groups panel: ::std::option::Option<ObservationUI_oneof_panel>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ObservationUI {} #[derive(Clone,PartialEq)] pub enum ObservationUI_oneof_panel { single(SinglePanel), multi(MultiPanel), cargo(CargoPanel), production(ProductionPanel), } impl ObservationUI { pub fn new() -> ObservationUI { ::std::default::Default::default() } pub fn default_instance() -> &'static ObservationUI { static mut instance: ::protobuf::lazy::Lazy<ObservationUI> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ObservationUI, }; unsafe { instance.get(ObservationUI::new) } } // repeated .SC2APIProtocol.ControlGroup groups = 1; pub fn clear_groups(&mut self) { self.groups.clear(); } // Param is passed by value, moved pub fn set_groups(&mut self, v: ::protobuf::RepeatedField<ControlGroup>) { self.groups = v; } // Mutable pointer to the field. pub fn mut_groups(&mut self) -> &mut ::protobuf::RepeatedField<ControlGroup> { &mut self.groups } // Take field pub fn take_groups(&mut self) -> ::protobuf::RepeatedField<ControlGroup> { ::std::mem::replace(&mut self.groups, ::protobuf::RepeatedField::new()) } pub fn get_groups(&self) -> &[ControlGroup] { &self.groups } fn get_groups_for_reflect(&self) -> &::protobuf::RepeatedField<ControlGroup> { &self.groups } fn mut_groups_for_reflect(&mut self) -> &mut ::protobuf::RepeatedField<ControlGroup> { &mut self.groups } // optional .SC2APIProtocol.SinglePanel single = 2; pub fn clear_single(&mut self) { self.panel = ::std::option::Option::None; } pub fn has_single(&self) -> bool { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::single(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_single(&mut self, v: SinglePanel) { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::single(v)) } // Mutable pointer to the field. pub fn mut_single(&mut self) -> &mut SinglePanel { if let ::std::option::Option::Some(ObservationUI_oneof_panel::single(_)) = self.panel { } else { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::single(SinglePanel::new())); } match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::single(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_single(&mut self) -> SinglePanel { if self.has_single() { match self.panel.take() { ::std::option::Option::Some(ObservationUI_oneof_panel::single(v)) => v, _ => panic!(), } } else { SinglePanel::new() } } pub fn get_single(&self) -> &SinglePanel { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::single(ref v)) => v, _ => SinglePanel::default_instance(), } } // optional .SC2APIProtocol.MultiPanel multi = 3; pub fn clear_multi(&mut self) { self.panel = ::std::option::Option::None; } pub fn has_multi(&self) -> bool { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::multi(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_multi(&mut self, v: MultiPanel) { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::multi(v)) } // Mutable pointer to the field. pub fn mut_multi(&mut self) -> &mut MultiPanel
// Take field pub fn take_multi(&mut self) -> MultiPanel { if self.has_multi() { match self.panel.take() { ::std::option::Option::Some(ObservationUI_oneof_panel::multi(v)) => v, _ => panic!(), } } else { MultiPanel::new() } } pub fn get_multi(&self) -> &MultiPanel { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::multi(ref v)) => v, _ => MultiPanel::default_instance(), } } // optional .SC2APIProtocol.CargoPanel cargo = 4; pub fn clear_cargo(&mut self) { self.panel = ::std::option::Option::None; } pub fn has_cargo(&self) -> bool { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_cargo(&mut self, v: CargoPanel) { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(v)) } // Mutable pointer to the field. pub fn mut_cargo(&mut self) -> &mut CargoPanel { if let ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(_)) = self.panel { } else { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(CargoPanel::new())); } match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_cargo(&mut self) -> CargoPanel { if self.has_cargo() { match self.panel.take() { ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(v)) => v, _ => panic!(), } } else { CargoPanel::new() } } pub fn get_cargo(&self) -> &CargoPanel { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(ref v)) => v, _ => CargoPanel::default_instance(), } } // optional .SC2APIProtocol.ProductionPanel production = 5; pub fn clear_production(&mut self) { self.panel = ::std::option::Option::None; } pub fn has_production(&self) -> bool { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::production(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_production(&mut self, v: ProductionPanel) { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::production(v)) } // Mutable pointer to the field. pub fn mut_production(&mut self) -> &mut ProductionPanel { if let ::std::option::Option::Some(ObservationUI_oneof_panel::production(_)) = self.panel { } else { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::production(ProductionPanel::new())); } match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::production(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_production(&mut self) -> ProductionPanel { if self.has_production() { match self.panel.take() { ::std::option::Option::Some(ObservationUI_oneof_panel::production(v)) => v, _ => panic!(), } } else { ProductionPanel::new() } } pub fn get_production(&self) -> &ProductionPanel { match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::production(ref v)) => v, _ => ProductionPanel::default_instance(), } } } impl ::protobuf::Message for ObservationUI { fn is_initialized(&self) -> bool { for v in &self.groups { if !v.is_initialized() { return false; } }; if let Some(ObservationUI_oneof_panel::single(ref v)) = self.panel { if !v.is_initialized() { return false; } } if let Some(ObservationUI_oneof_panel::multi(ref v)) = self.panel { if !v.is_initialized() { return false; } } if let Some(ObservationUI_oneof_panel::cargo(ref v)) = self.panel { if !v.is_initialized() { return false; } } if let Some(ObservationUI_oneof_panel::production(ref v)) = self.panel { if !v.is_initialized() { return false; } } true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.groups)?; }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::single(is.read_message()?)); }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::multi(is.read_message()?)); }, 4 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::cargo(is.read_message()?)); }, 5 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::production(is.read_message()?)); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; for value in &self.groups { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; if let ::std::option::Option::Some(ref v) = self.panel { match v { &ObservationUI_oneof_panel::single(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ObservationUI_oneof_panel::multi(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ObservationUI_oneof_panel::cargo(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ObservationUI_oneof_panel::production(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, }; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { for v in &self.groups { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; if let ::std::option::Option::Some(ref v) = self.panel { match v { &ObservationUI_oneof_panel::single(ref v) => { os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ObservationUI_oneof_panel::multi(ref v) => { os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ObservationUI_oneof_panel::cargo(ref v) => { os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ObservationUI_oneof_panel::production(ref v) => { os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, }; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ObservationUI { fn new() -> ObservationUI { ObservationUI::new() } fn descriptor_static(_: ::std::option::Option<ObservationUI>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<ControlGroup>>( "groups", ObservationUI::get_groups_for_reflect, ObservationUI::mut_groups_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, SinglePanel>( "single", ObservationUI::has_single, ObservationUI::get_single, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, MultiPanel>( "multi", ObservationUI::has_multi, ObservationUI::get_multi, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CargoPanel>( "cargo", ObservationUI::has_cargo, ObservationUI::get_cargo, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ProductionPanel>( "production", ObservationUI::has_production, ObservationUI::get_production, )); ::protobuf::reflect::MessageDescriptor::new::<ObservationUI>( "ObservationUI", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ObservationUI { fn clear(&mut self) { self.clear_groups(); self.clear_single(); self.clear_multi(); self.clear_cargo(); self.clear_production(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ObservationUI { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ObservationUI { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ControlGroup { // message fields control_group_index: ::std::option::Option<u32>, leader_unit_type: ::std::option::Option<u32>, count: ::std::option::Option<u32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ControlGroup {} impl ControlGroup { pub fn new() -> ControlGroup { ::std::default::Default::default() } pub fn default_instance() -> &'static ControlGroup { static mut instance: ::protobuf::lazy::Lazy<ControlGroup> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ControlGroup, }; unsafe { instance.get(ControlGroup::new) } } // optional uint32 control_group_index = 1; pub fn clear_control_group_index(&mut self) { self.control_group_index = ::std::option::Option::None; } pub fn has_control_group_index(&self) -> bool { self.control_group_index.is_some() } // Param is passed by value, moved pub fn set_control_group_index(&mut self, v: u32) { self.control_group_index = ::std::option::Option::Some(v); } pub fn get_control_group_index(&self) -> u32 { self.control_group_index.unwrap_or(0) } fn get_control_group_index_for_reflect(&self) -> &::std::option::Option<u32> { &self.control_group_index } fn mut_control_group_index_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.control_group_index } // optional uint32 leader_unit_type = 2; pub fn clear_leader_unit_type(&mut self) { self.leader_unit_type = ::std::option::Option::None; } pub fn has_leader_unit_type(&self) -> bool { self.leader_unit_type.is_some() } // Param is passed by value, moved pub fn set_leader_unit_type(&mut self, v: u32) { self.leader_unit_type = ::std::option::Option::Some(v); } pub fn get_leader_unit_type(&self) -> u32 { self.leader_unit_type.unwrap_or(0) } fn get_leader_unit_type_for_reflect(&self) -> &::std::option::Option<u32> { &self.leader_unit_type } fn mut_leader_unit_type_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.leader_unit_type } // optional uint32 count = 3; pub fn clear_count(&mut self) { self.count = ::std::option::Option::None; } pub fn has_count(&self) -> bool { self.count.is_some() } // Param is passed by value, moved pub fn set_count(&mut self, v: u32) { self.count = ::std::option::Option::Some(v); } pub fn get_count(&self) -> u32 { self.count.unwrap_or(0) } fn get_count_for_reflect(&self) -> &::std::option::Option<u32> { &self.count } fn mut_count_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.count } } impl ::protobuf::Message for ControlGroup { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.control_group_index = ::std::option::Option::Some(tmp); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.leader_unit_type = ::std::option::Option::Some(tmp); }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.count = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.control_group_index { my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.leader_unit_type { my_size += ::protobuf::rt::value_size(2, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.count { my_size += ::protobuf::rt::value_size(3, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.control_group_index { os.write_uint32(1, v)?; } if let Some(v) = self.leader_unit_type { os.write_uint32(2, v)?; } if let Some(v) = self.count { os.write_uint32(3, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ControlGroup { fn new() -> ControlGroup { ControlGroup::new() } fn descriptor_static(_: ::std::option::Option<ControlGroup>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "control_group_index", ControlGroup::get_control_group_index_for_reflect, ControlGroup::mut_control_group_index_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "leader_unit_type", ControlGroup::get_leader_unit_type_for_reflect, ControlGroup::mut_leader_unit_type_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "count", ControlGroup::get_count_for_reflect, ControlGroup::mut_count_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ControlGroup>( "ControlGroup", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ControlGroup { fn clear(&mut self) { self.clear_control_group_index(); self.clear_leader_unit_type(); self.clear_count(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ControlGroup { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ControlGroup { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct UnitInfo { // message fields unit_type: ::std::option::Option<u32>, player_relative: ::std::option::Option<u32>, health: ::std::option::Option<i32>, shields: ::std::option::Option<i32>, energy: ::std::option::Option<i32>, transport_slots_taken: ::std::option::Option<i32>, build_progress: ::std::option::Option<f32>, add_on: ::protobuf::SingularPtrField<UnitInfo>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for UnitInfo {} impl UnitInfo { pub fn new() -> UnitInfo { ::std::default::Default::default() } pub fn default_instance() -> &'static UnitInfo { static mut instance: ::protobuf::lazy::Lazy<UnitInfo> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const UnitInfo, }; unsafe { instance.get(UnitInfo::new) } } // optional uint32 unit_type = 1; pub fn clear_unit_type(&mut self) { self.unit_type = ::std::option::Option::None; } pub fn has_unit_type(&self) -> bool { self.unit_type.is_some() } // Param is passed by value, moved pub fn set_unit_type(&mut self, v: u32) { self.unit_type = ::std::option::Option::Some(v); } pub fn get_unit_type(&self) -> u32 { self.unit_type.unwrap_or(0) } fn get_unit_type_for_reflect(&self) -> &::std::option::Option<u32> { &self.unit_type } fn mut_unit_type_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.unit_type } // optional uint32 player_relative = 2; pub fn clear_player_relative(&mut self) { self.player_relative = ::std::option::Option::None; } pub fn has_player_relative(&self) -> bool { self.player_relative.is_some() } // Param is passed by value, moved pub fn set_player_relative(&mut self, v: u32) { self.player_relative = ::std::option::Option::Some(v); } pub fn get_player_relative(&self) -> u32 { self.player_relative.unwrap_or(0) } fn get_player_relative_for_reflect(&self) -> &::std::option::Option<u32> { &self.player_relative } fn mut_player_relative_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.player_relative } // optional int32 health = 3; pub fn clear_health(&mut self) { self.health = ::std::option::Option::None; } pub fn has_health(&self) -> bool { self.health.is_some() } // Param is passed by value, moved pub fn set_health(&mut self, v: i32) { self.health = ::std::option::Option::Some(v); } pub fn get_health(&self) -> i32 { self.health.unwrap_or(0) } fn get_health_for_reflect(&self) -> &::std::option::Option<i32> { &self.health } fn mut_health_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.health } // optional int32 shields = 4; pub fn clear_shields(&mut self) { self.shields = ::std::option::Option::None; } pub fn has_shields(&self) -> bool { self.shields.is_some() } // Param is passed by value, moved pub fn set_shields(&mut self, v: i32) { self.shields = ::std::option::Option::Some(v); } pub fn get_shields(&self) -> i32 { self.shields.unwrap_or(0) } fn get_shields_for_reflect(&self) -> &::std::option::Option<i32> { &self.shields } fn mut_shields_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.shields } // optional int32 energy = 5; pub fn clear_energy(&mut self) { self.energy = ::std::option::Option::None; } pub fn has_energy(&self) -> bool { self.energy.is_some() } // Param is passed by value, moved pub fn set_energy(&mut self, v: i32) { self.energy = ::std::option::Option::Some(v); } pub fn get_energy(&self) -> i32 { self.energy.unwrap_or(0) } fn get_energy_for_reflect(&self) -> &::std::option::Option<i32> { &self.energy } fn mut_energy_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.energy } // optional int32 transport_slots_taken = 6; pub fn clear_transport_slots_taken(&mut self) { self.transport_slots_taken = ::std::option::Option::None; } pub fn has_transport_slots_taken(&self) -> bool { self.transport_slots_taken.is_some() } // Param is passed by value, moved pub fn set_transport_slots_taken(&mut self, v: i32) { self.transport_slots_taken = ::std::option::Option::Some(v); } pub fn get_transport_slots_taken(&self) -> i32 { self.transport_slots_taken.unwrap_or(0) } fn get_transport_slots_taken_for_reflect(&self) -> &::std::option::Option<i32> { &self.transport_slots_taken } fn mut_transport_slots_taken_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.transport_slots_taken } // optional float build_progress = 7; pub fn clear_build_progress(&mut self) { self.build_progress = ::std::option::Option::None; } pub fn has_build_progress(&self) -> bool { self.build_progress.is_some() } // Param is passed by value, moved pub fn set_build_progress(&mut self, v: f32) { self.build_progress = ::std::option::Option::Some(v); } pub fn get_build_progress(&self) -> f32 { self.build_progress.unwrap_or(0.) } fn get_build_progress_for_reflect(&self) -> &::std::option::Option<f32> { &self.build_progress } fn mut_build_progress_for_reflect(&mut self) -> &mut ::std::option::Option<f32> { &mut self.build_progress } // optional .SC2APIProtocol.UnitInfo add_on = 8; pub fn clear_add_on(&mut self) { self.add_on.clear(); } pub fn has_add_on(&self) -> bool { self.add_on.is_some() } // Param is passed by value, moved pub fn set_add_on(&mut self, v: UnitInfo) { self.add_on = ::protobuf::SingularPtrField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_add_on(&mut self) -> &mut UnitInfo { if self.add_on.is_none() { self.add_on.set_default(); } self.add_on.as_mut().unwrap() } // Take field pub fn take_add_on(&mut self) -> UnitInfo { self.add_on.take().unwrap_or_else(|| UnitInfo::new()) } pub fn get_add_on(&self) -> &UnitInfo { self.add_on.as_ref().unwrap_or_else(|| UnitInfo::default_instance()) } fn get_add_on_for_reflect(&self) -> &::protobuf::SingularPtrField<UnitInfo> { &self.add_on } fn mut_add_on_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<UnitInfo> { &mut self.add_on } } impl ::protobuf::Message for UnitInfo { fn is_initialized(&self) -> bool { for v in &self.add_on { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.unit_type = ::std::option::Option::Some(tmp); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.player_relative = ::std::option::Option::Some(tmp); }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.health = ::std::option::Option::Some(tmp); }, 4 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.shields = ::std::option::Option::Some(tmp); }, 5 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.energy = ::std::option::Option::Some(tmp); }, 6 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.transport_slots_taken = ::std::option::Option::Some(tmp); }, 7 => { if wire_type != ::protobuf::wire_format::WireTypeFixed32 { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_float()?; self.build_progress = ::std::option::Option::Some(tmp); }, 8 => { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.add_on)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.unit_type { my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.player_relative { my_size += ::protobuf::rt::value_size(2, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.health { my_size += ::protobuf::rt::value_size(3, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.shields { my_size += ::protobuf::rt::value_size(4, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.energy { my_size += ::protobuf::rt::value_size(5, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.transport_slots_taken { my_size += ::protobuf::rt::value_size(6, v, ::protobuf::wire_format::WireTypeVarint); } if let Some(v) = self.build_progress { my_size += 5; } if let Some(ref v) = self.add_on.as_ref() { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.unit_type { os.write_uint32(1, v)?; } if let Some(v) = self.player_relative { os.write_uint32(2, v)?; } if let Some(v) = self.health { os.write_int32(3, v)?; } if let Some(v) = self.shields { os.write_int32(4, v)?; } if let Some(v) = self.energy { os.write_int32(5, v)?; } if let Some(v) = self.transport_slots_taken { os.write_int32(6, v)?; } if let Some(v) = self.build_progress { os.write_float(7, v)?; } if let Some(ref v) = self.add_on.as_ref() { os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for UnitInfo { fn new() -> UnitInfo { UnitInfo::new() } fn descriptor_static(_: ::std::option::Option<UnitInfo>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "unit_type", UnitInfo::get_unit_type_for_reflect, UnitInfo::mut_unit_type_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "player_relative", UnitInfo::get_player_relative_for_reflect, UnitInfo::mut_player_relative_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "health", UnitInfo::get_health_for_reflect, UnitInfo::mut_health_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "shields", UnitInfo::get_shields_for_reflect, UnitInfo::mut_shields_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "energy", UnitInfo::get_energy_for_reflect, UnitInfo::mut_energy_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "transport_slots_taken", UnitInfo::get_transport_slots_taken_for_reflect, UnitInfo::mut_transport_slots_taken_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeFloat>( "build_progress", UnitInfo::get_build_progress_for_reflect, UnitInfo::mut_build_progress_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "add_on", UnitInfo::get_add_on_for_reflect, UnitInfo::mut_add_on_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<UnitInfo>( "UnitInfo", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for UnitInfo { fn clear(&mut self) { self.clear_unit_type(); self.clear_player_relative(); self.clear_health(); self.clear_shields(); self.clear_energy(); self.clear_transport_slots_taken(); self.clear_build_progress(); self.clear_add_on(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for UnitInfo { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for UnitInfo { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct SinglePanel { // message fields unit: ::protobuf::SingularPtrField<UnitInfo>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for SinglePanel {} impl SinglePanel { pub fn new() -> SinglePanel { ::std::default::Default::default() } pub fn default_instance() -> &'static SinglePanel { static mut instance: ::protobuf::lazy::Lazy<SinglePanel> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const SinglePanel, }; unsafe { instance.get(SinglePanel::new) } } // optional .SC2APIProtocol.UnitInfo unit = 1; pub fn clear_unit(&mut self) { self.unit.clear(); } pub fn has_unit(&self) -> bool { self.unit.is_some() } // Param is passed by value, moved pub fn set_unit(&mut self, v: UnitInfo) { self.unit = ::protobuf::SingularPtrField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_unit(&mut self) -> &mut UnitInfo { if self.unit.is_none() { self.unit.set_default(); } self.unit.as_mut().unwrap() } // Take field pub fn take_unit(&mut self) -> UnitInfo { self.unit.take().unwrap_or_else(|| UnitInfo::new()) } pub fn get_unit(&self) -> &UnitInfo { self.unit.as_ref().unwrap_or_else(|| UnitInfo::default_instance()) } fn get_unit_for_reflect(&self) -> &::protobuf::SingularPtrField<UnitInfo> { &self.unit } fn mut_unit_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<UnitInfo> { &mut self.unit } } impl ::protobuf::Message for SinglePanel { fn is_initialized(&self) -> bool { for v in &self.unit { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.unit)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(ref v) = self.unit.as_ref() { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(ref v) = self.unit.as_ref() { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for SinglePanel { fn new() -> SinglePanel { SinglePanel::new() } fn descriptor_static(_: ::std::option::Option<SinglePanel>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "unit", SinglePanel::get_unit_for_reflect, SinglePanel::mut_unit_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<SinglePanel>( "SinglePanel", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for SinglePanel { fn clear(&mut self) { self.clear_unit(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for SinglePanel { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for SinglePanel { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct MultiPanel { // message fields units: ::protobuf::RepeatedField<UnitInfo>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for MultiPanel {} impl MultiPanel { pub fn new() -> MultiPanel { ::std::default::Default::default() } pub fn default_instance() -> &'static MultiPanel { static mut instance: ::protobuf::lazy::Lazy<MultiPanel> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const MultiPanel, }; unsafe { instance.get(MultiPanel::new) } } // repeated .SC2APIProtocol.UnitInfo units = 1; pub fn clear_units(&mut self) { self.units.clear(); } // Param is passed by value, moved pub fn set_units(&mut self, v: ::protobuf::RepeatedField<UnitInfo>) { self.units = v; } // Mutable pointer to the field. pub fn mut_units(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.units } // Take field pub fn take_units(&mut self) -> ::protobuf::RepeatedField<UnitInfo> { ::std::mem::replace(&mut self.units, ::protobuf::RepeatedField::new()) } pub fn get_units(&self) -> &[UnitInfo] { &self.units } fn get_units_for_reflect(&self) -> &::protobuf::RepeatedField<UnitInfo> { &self.units } fn mut_units_for_reflect(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.units } } impl ::protobuf::Message for MultiPanel { fn is_initialized(&self) -> bool { for v in &self.units { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.units)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; for value in &self.units { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { for v in &self.units { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for MultiPanel { fn new() -> MultiPanel { MultiPanel::new() } fn descriptor_static(_: ::std::option::Option<MultiPanel>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "units", MultiPanel::get_units_for_reflect, MultiPanel::mut_units_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<MultiPanel>( "MultiPanel", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for MultiPanel { fn clear(&mut self) { self.clear_units(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for MultiPanel { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for MultiPanel { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct CargoPanel { // message fields unit: ::protobuf::SingularPtrField<UnitInfo>, passengers: ::protobuf::RepeatedField<UnitInfo>, slots_available: ::std::option::Option<i32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for CargoPanel {} impl CargoPanel { pub fn new() -> CargoPanel { ::std::default::Default::default() } pub fn default_instance() -> &'static CargoPanel { static mut instance: ::protobuf::lazy::Lazy<CargoPanel> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const CargoPanel, }; unsafe { instance.get(CargoPanel::new) } } // optional .SC2APIProtocol.UnitInfo unit = 1; pub fn clear_unit(&mut self) { self.unit.clear(); } pub fn has_unit(&self) -> bool { self.unit.is_some() } // Param is passed by value, moved pub fn set_unit(&mut self, v: UnitInfo) { self.unit = ::protobuf::SingularPtrField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_unit(&mut self) -> &mut UnitInfo { if self.unit.is_none() { self.unit.set_default(); } self.unit.as_mut().unwrap() } // Take field pub fn take_unit(&mut self) -> UnitInfo { self.unit.take().unwrap_or_else(|| UnitInfo::new()) } pub fn get_unit(&self) -> &UnitInfo { self.unit.as_ref().unwrap_or_else(|| UnitInfo::default_instance()) } fn get_unit_for_reflect(&self) -> &::protobuf::SingularPtrField<UnitInfo> { &self.unit } fn mut_unit_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<UnitInfo> { &mut self.unit } // repeated .SC2APIProtocol.UnitInfo passengers = 2; pub fn clear_passengers(&mut self) { self.passengers.clear(); } // Param is passed by value, moved pub fn set_passengers(&mut self, v: ::protobuf::RepeatedField<UnitInfo>) { self.passengers = v; } // Mutable pointer to the field. pub fn mut_passengers(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.passengers } // Take field pub fn take_passengers(&mut self) -> ::protobuf::RepeatedField<UnitInfo> { ::std::mem::replace(&mut self.passengers, ::protobuf::RepeatedField::new()) } pub fn get_passengers(&self) -> &[UnitInfo] { &self.passengers } fn get_passengers_for_reflect(&self) -> &::protobuf::RepeatedField<UnitInfo> { &self.passengers } fn mut_passengers_for_reflect(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.passengers } // optional int32 slots_available = 3; pub fn clear_slots_available(&mut self) { self.slots_available = ::std::option::Option::None; } pub fn has_slots_available(&self) -> bool { self.slots_available.is_some() } // Param is passed by value, moved pub fn set_slots_available(&mut self, v: i32) { self.slots_available = ::std::option::Option::Some(v); } pub fn get_slots_available(&self) -> i32 { self.slots_available.unwrap_or(0) } fn get_slots_available_for_reflect(&self) -> &::std::option::Option<i32> { &self.slots_available } fn mut_slots_available_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.slots_available } } impl ::protobuf::Message for CargoPanel { fn is_initialized(&self) -> bool { for v in &self.unit { if !v.is_initialized() { return false; } }; for v in &self.passengers { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.unit)?; }, 2 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.passengers)?; }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.slots_available = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(ref v) = self.unit.as_ref() { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } for value in &self.passengers { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; if let Some(v) = self.slots_available { my_size += ::protobuf::rt::value_size(3, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(ref v) = self.unit.as_ref() { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } for v in &self.passengers { os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; if let Some(v) = self.slots_available { os.write_int32(3, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for CargoPanel { fn new() -> CargoPanel { CargoPanel::new() } fn descriptor_static(_: ::std::option::Option<CargoPanel>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "unit", CargoPanel::get_unit_for_reflect, CargoPanel::mut_unit_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "passengers", CargoPanel::get_passengers_for_reflect, CargoPanel::mut_passengers_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "slots_available", CargoPanel::get_slots_available_for_reflect, CargoPanel::mut_slots_available_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<CargoPanel>( "CargoPanel", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for CargoPanel { fn clear(&mut self) { self.clear_unit(); self.clear_passengers(); self.clear_slots_available(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for CargoPanel { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for CargoPanel { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ProductionPanel { // message fields unit: ::protobuf::SingularPtrField<UnitInfo>, build_queue: ::protobuf::RepeatedField<UnitInfo>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ProductionPanel {} impl ProductionPanel { pub fn new() -> ProductionPanel { ::std::default::Default::default() } pub fn default_instance() -> &'static ProductionPanel { static mut instance: ::protobuf::lazy::Lazy<ProductionPanel> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ProductionPanel, }; unsafe { instance.get(ProductionPanel::new) } } // optional .SC2APIProtocol.UnitInfo unit = 1; pub fn clear_unit(&mut self) { self.unit.clear(); } pub fn has_unit(&self) -> bool { self.unit.is_some() } // Param is passed by value, moved pub fn set_unit(&mut self, v: UnitInfo) { self.unit = ::protobuf::SingularPtrField::some(v); } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_unit(&mut self) -> &mut UnitInfo { if self.unit.is_none() { self.unit.set_default(); } self.unit.as_mut().unwrap() } // Take field pub fn take_unit(&mut self) -> UnitInfo { self.unit.take().unwrap_or_else(|| UnitInfo::new()) } pub fn get_unit(&self) -> &UnitInfo { self.unit.as_ref().unwrap_or_else(|| UnitInfo::default_instance()) } fn get_unit_for_reflect(&self) -> &::protobuf::SingularPtrField<UnitInfo> { &self.unit } fn mut_unit_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<UnitInfo> { &mut self.unit } // repeated .SC2APIProtocol.UnitInfo build_queue = 2; pub fn clear_build_queue(&mut self) { self.build_queue.clear(); } // Param is passed by value, moved pub fn set_build_queue(&mut self, v: ::protobuf::RepeatedField<UnitInfo>) { self.build_queue = v; } // Mutable pointer to the field. pub fn mut_build_queue(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.build_queue } // Take field pub fn take_build_queue(&mut self) -> ::protobuf::RepeatedField<UnitInfo> { ::std::mem::replace(&mut self.build_queue, ::protobuf::RepeatedField::new()) } pub fn get_build_queue(&self) -> &[UnitInfo] { &self.build_queue } fn get_build_queue_for_reflect(&self) -> &::protobuf::RepeatedField<UnitInfo> { &self.build_queue } fn mut_build_queue_for_reflect(&mut self) -> &mut ::protobuf::RepeatedField<UnitInfo> { &mut self.build_queue } } impl ::protobuf::Message for ProductionPanel { fn is_initialized(&self) -> bool { for v in &self.unit { if !v.is_initialized() { return false; } }; for v in &self.build_queue { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.unit)?; }, 2 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.build_queue)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(ref v) = self.unit.as_ref() { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; } for value in &self.build_queue { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(ref v) = self.unit.as_ref() { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; } for v in &self.build_queue { os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ProductionPanel { fn new() -> ProductionPanel { ProductionPanel::new() } fn descriptor_static(_: ::std::option::Option<ProductionPanel>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "unit", ProductionPanel::get_unit_for_reflect, ProductionPanel::mut_unit_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<UnitInfo>>( "build_queue", ProductionPanel::get_build_queue_for_reflect, ProductionPanel::mut_build_queue_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ProductionPanel>( "ProductionPanel", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ProductionPanel { fn clear(&mut self) { self.clear_unit(); self.clear_build_queue(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ProductionPanel { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ProductionPanel { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionUI { // message oneof groups action: ::std::option::Option<ActionUI_oneof_action>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionUI {} #[derive(Clone,PartialEq)] pub enum ActionUI_oneof_action { control_group(ActionControlGroup), select_army(ActionSelectArmy), select_warp_gates(ActionSelectWarpGates), select_larva(ActionSelectLarva), select_idle_worker(ActionSelectIdleWorker), multi_panel(ActionMultiPanel), cargo_panel(ActionCargoPanelUnload), production_panel(ActionProductionPanelRemoveFromQueue), toggle_autocast(ActionToggleAutocast), } impl ActionUI { pub fn new() -> ActionUI { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionUI { static mut instance: ::protobuf::lazy::Lazy<ActionUI> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionUI, }; unsafe { instance.get(ActionUI::new) } } // optional .SC2APIProtocol.ActionControlGroup control_group = 1; pub fn clear_control_group(&mut self) { self.action = ::std::option::Option::None; } pub fn has_control_group(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::control_group(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_control_group(&mut self, v: ActionControlGroup) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::control_group(v)) } // Mutable pointer to the field. pub fn mut_control_group(&mut self) -> &mut ActionControlGroup { if let ::std::option::Option::Some(ActionUI_oneof_action::control_group(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::control_group(ActionControlGroup::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::control_group(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_control_group(&mut self) -> ActionControlGroup { if self.has_control_group() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::control_group(v)) => v, _ => panic!(), } } else { ActionControlGroup::new() } } pub fn get_control_group(&self) -> &ActionControlGroup { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::control_group(ref v)) => v, _ => ActionControlGroup::default_instance(), } } // optional .SC2APIProtocol.ActionSelectArmy select_army = 2; pub fn clear_select_army(&mut self) { self.action = ::std::option::Option::None; } pub fn has_select_army(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_army(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_select_army(&mut self, v: ActionSelectArmy) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_army(v)) } // Mutable pointer to the field. pub fn mut_select_army(&mut self) -> &mut ActionSelectArmy { if let ::std::option::Option::Some(ActionUI_oneof_action::select_army(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_army(ActionSelectArmy::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_army(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_select_army(&mut self) -> ActionSelectArmy { if self.has_select_army() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::select_army(v)) => v, _ => panic!(), } } else { ActionSelectArmy::new() } } pub fn get_select_army(&self) -> &ActionSelectArmy { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_army(ref v)) => v, _ => ActionSelectArmy::default_instance(), } } // optional .SC2APIProtocol.ActionSelectWarpGates select_warp_gates = 3; pub fn clear_select_warp_gates(&mut self) { self.action = ::std::option::Option::None; } pub fn has_select_warp_gates(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_select_warp_gates(&mut self, v: ActionSelectWarpGates) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(v)) } // Mutable pointer to the field. pub fn mut_select_warp_gates(&mut self) -> &mut ActionSelectWarpGates { if let ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(ActionSelectWarpGates::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_select_warp_gates(&mut self) -> ActionSelectWarpGates { if self.has_select_warp_gates() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(v)) => v, _ => panic!(), } } else { ActionSelectWarpGates::new() } } pub fn get_select_warp_gates(&self) -> &ActionSelectWarpGates { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(ref v)) => v, _ => ActionSelectWarpGates::default_instance(), } } // optional .SC2APIProtocol.ActionSelectLarva select_larva = 4; pub fn clear_select_larva(&mut self) { self.action = ::std::option::Option::None; } pub fn has_select_larva(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_larva(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_select_larva(&mut self, v: ActionSelectLarva) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_larva(v)) } // Mutable pointer to the field. pub fn mut_select_larva(&mut self) -> &mut ActionSelectLarva { if let ::std::option::Option::Some(ActionUI_oneof_action::select_larva(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_larva(ActionSelectLarva::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_larva(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_select_larva(&mut self) -> ActionSelectLarva { if self.has_select_larva() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::select_larva(v)) => v, _ => panic!(), } } else { ActionSelectLarva::new() } } pub fn get_select_larva(&self) -> &ActionSelectLarva { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_larva(ref v)) => v, _ => ActionSelectLarva::default_instance(), } } // optional .SC2APIProtocol.ActionSelectIdleWorker select_idle_worker = 5; pub fn clear_select_idle_worker(&mut self) { self.action = ::std::option::Option::None; } pub fn has_select_idle_worker(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_select_idle_worker(&mut self, v: ActionSelectIdleWorker) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(v)) } // Mutable pointer to the field. pub fn mut_select_idle_worker(&mut self) -> &mut ActionSelectIdleWorker { if let ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(ActionSelectIdleWorker::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_select_idle_worker(&mut self) -> ActionSelectIdleWorker { if self.has_select_idle_worker() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(v)) => v, _ => panic!(), } } else { ActionSelectIdleWorker::new() } } pub fn get_select_idle_worker(&self) -> &ActionSelectIdleWorker { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(ref v)) => v, _ => ActionSelectIdleWorker::default_instance(), } } // optional .SC2APIProtocol.ActionMultiPanel multi_panel = 6; pub fn clear_multi_panel(&mut self) { self.action = ::std::option::Option::None; } pub fn has_multi_panel(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_multi_panel(&mut self, v: ActionMultiPanel) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(v)) } // Mutable pointer to the field. pub fn mut_multi_panel(&mut self) -> &mut ActionMultiPanel { if let ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(ActionMultiPanel::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_multi_panel(&mut self) -> ActionMultiPanel { if self.has_multi_panel() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(v)) => v, _ => panic!(), } } else { ActionMultiPanel::new() } } pub fn get_multi_panel(&self) -> &ActionMultiPanel { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(ref v)) => v, _ => ActionMultiPanel::default_instance(), } } // optional .SC2APIProtocol.ActionCargoPanelUnload cargo_panel = 7; pub fn clear_cargo_panel(&mut self) { self.action = ::std::option::Option::None; } pub fn has_cargo_panel(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_cargo_panel(&mut self, v: ActionCargoPanelUnload) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(v)) } // Mutable pointer to the field. pub fn mut_cargo_panel(&mut self) -> &mut ActionCargoPanelUnload { if let ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(ActionCargoPanelUnload::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_cargo_panel(&mut self) -> ActionCargoPanelUnload { if self.has_cargo_panel() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(v)) => v, _ => panic!(), } } else { ActionCargoPanelUnload::new() } } pub fn get_cargo_panel(&self) -> &ActionCargoPanelUnload { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(ref v)) => v, _ => ActionCargoPanelUnload::default_instance(), } } // optional .SC2APIProtocol.ActionProductionPanelRemoveFromQueue production_panel = 8; pub fn clear_production_panel(&mut self) { self.action = ::std::option::Option::None; } pub fn has_production_panel(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::production_panel(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_production_panel(&mut self, v: ActionProductionPanelRemoveFromQueue) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::production_panel(v)) } // Mutable pointer to the field. pub fn mut_production_panel(&mut self) -> &mut ActionProductionPanelRemoveFromQueue { if let ::std::option::Option::Some(ActionUI_oneof_action::production_panel(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::production_panel(ActionProductionPanelRemoveFromQueue::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::production_panel(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_production_panel(&mut self) -> ActionProductionPanelRemoveFromQueue { if self.has_production_panel() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::production_panel(v)) => v, _ => panic!(), } } else { ActionProductionPanelRemoveFromQueue::new() } } pub fn get_production_panel(&self) -> &ActionProductionPanelRemoveFromQueue { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::production_panel(ref v)) => v, _ => ActionProductionPanelRemoveFromQueue::default_instance(), } } // optional .SC2APIProtocol.ActionToggleAutocast toggle_autocast = 9; pub fn clear_toggle_autocast(&mut self) { self.action = ::std::option::Option::None; } pub fn has_toggle_autocast(&self) -> bool { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(..)) => true, _ => false, } } // Param is passed by value, moved pub fn set_toggle_autocast(&mut self, v: ActionToggleAutocast) { self.action = ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(v)) } // Mutable pointer to the field. pub fn mut_toggle_autocast(&mut self) -> &mut ActionToggleAutocast { if let ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(_)) = self.action { } else { self.action = ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(ActionToggleAutocast::new())); } match self.action { ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(ref mut v)) => v, _ => panic!(), } } // Take field pub fn take_toggle_autocast(&mut self) -> ActionToggleAutocast { if self.has_toggle_autocast() { match self.action.take() { ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(v)) => v, _ => panic!(), } } else { ActionToggleAutocast::new() } } pub fn get_toggle_autocast(&self) -> &ActionToggleAutocast { match self.action { ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(ref v)) => v, _ => ActionToggleAutocast::default_instance(), } } } impl ::protobuf::Message for ActionUI { fn is_initialized(&self) -> bool { if let Some(ActionUI_oneof_action::control_group(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::select_army(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::select_warp_gates(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::select_larva(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::select_idle_worker(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::multi_panel(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::cargo_panel(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::production_panel(ref v)) = self.action { if !v.is_initialized() { return false; } } if let Some(ActionUI_oneof_action::toggle_autocast(ref v)) = self.action { if !v.is_initialized() { return false; } } true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::control_group(is.read_message()?)); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_army(is.read_message()?)); }, 3 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_warp_gates(is.read_message()?)); }, 4 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_larva(is.read_message()?)); }, 5 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::select_idle_worker(is.read_message()?)); }, 6 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::multi_panel(is.read_message()?)); }, 7 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::cargo_panel(is.read_message()?)); }, 8 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::production_panel(is.read_message()?)); }, 9 => { if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } self.action = ::std::option::Option::Some(ActionUI_oneof_action::toggle_autocast(is.read_message()?)); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let ::std::option::Option::Some(ref v) = self.action { match v { &ActionUI_oneof_action::control_group(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::select_army(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::select_warp_gates(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::select_larva(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::select_idle_worker(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::multi_panel(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::cargo_panel(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::production_panel(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, &ActionUI_oneof_action::toggle_autocast(ref v) => { let len = v.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }, }; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let ::std::option::Option::Some(ref v) = self.action { match v { &ActionUI_oneof_action::control_group(ref v) => { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::select_army(ref v) => { os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::select_warp_gates(ref v) => { os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::select_larva(ref v) => { os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::select_idle_worker(ref v) => { os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::multi_panel(ref v) => { os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::cargo_panel(ref v) => { os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::production_panel(ref v) => { os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, &ActionUI_oneof_action::toggle_autocast(ref v) => { os.write_tag(9, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }, }; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionUI { fn new() -> ActionUI { ActionUI::new() } fn descriptor_static(_: ::std::option::Option<ActionUI>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionControlGroup>( "control_group", ActionUI::has_control_group, ActionUI::get_control_group, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionSelectArmy>( "select_army", ActionUI::has_select_army, ActionUI::get_select_army, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionSelectWarpGates>( "select_warp_gates", ActionUI::has_select_warp_gates, ActionUI::get_select_warp_gates, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionSelectLarva>( "select_larva", ActionUI::has_select_larva, ActionUI::get_select_larva, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionSelectIdleWorker>( "select_idle_worker", ActionUI::has_select_idle_worker, ActionUI::get_select_idle_worker, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionMultiPanel>( "multi_panel", ActionUI::has_multi_panel, ActionUI::get_multi_panel, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionCargoPanelUnload>( "cargo_panel", ActionUI::has_cargo_panel, ActionUI::get_cargo_panel, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionProductionPanelRemoveFromQueue>( "production_panel", ActionUI::has_production_panel, ActionUI::get_production_panel, )); fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, ActionToggleAutocast>( "toggle_autocast", ActionUI::has_toggle_autocast, ActionUI::get_toggle_autocast, )); ::protobuf::reflect::MessageDescriptor::new::<ActionUI>( "ActionUI", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionUI { fn clear(&mut self) { self.clear_control_group(); self.clear_select_army(); self.clear_select_warp_gates(); self.clear_select_larva(); self.clear_select_idle_worker(); self.clear_multi_panel(); self.clear_cargo_panel(); self.clear_production_panel(); self.clear_toggle_autocast(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionUI { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionUI { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionControlGroup { // message fields action: ::std::option::Option<ActionControlGroup_ControlGroupAction>, control_group_index: ::std::option::Option<u32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionControlGroup {} impl ActionControlGroup { pub fn new() -> ActionControlGroup { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionControlGroup { static mut instance: ::protobuf::lazy::Lazy<ActionControlGroup> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionControlGroup, }; unsafe { instance.get(ActionControlGroup::new) } } // optional .SC2APIProtocol.ActionControlGroup.ControlGroupAction action = 1; pub fn clear_action(&mut self) { self.action = ::std::option::Option::None; } pub fn has_action(&self) -> bool { self.action.is_some() } // Param is passed by value, moved pub fn set_action(&mut self, v: ActionControlGroup_ControlGroupAction) { self.action = ::std::option::Option::Some(v); } pub fn get_action(&self) -> ActionControlGroup_ControlGroupAction { self.action.unwrap_or(ActionControlGroup_ControlGroupAction::Recall) } fn get_action_for_reflect(&self) -> &::std::option::Option<ActionControlGroup_ControlGroupAction> { &self.action } fn mut_action_for_reflect(&mut self) -> &mut ::std::option::Option<ActionControlGroup_ControlGroupAction> { &mut self.action } // optional uint32 control_group_index = 2; pub fn clear_control_group_index(&mut self) { self.control_group_index = ::std::option::Option::None; } pub fn has_control_group_index(&self) -> bool { self.control_group_index.is_some() } // Param is passed by value, moved pub fn set_control_group_index(&mut self, v: u32) { self.control_group_index = ::std::option::Option::Some(v); } pub fn get_control_group_index(&self) -> u32 { self.control_group_index.unwrap_or(0) } fn get_control_group_index_for_reflect(&self) -> &::std::option::Option<u32> { &self.control_group_index } fn mut_control_group_index_for_reflect(&mut self) -> &mut ::std::option::Option<u32> { &mut self.control_group_index } } impl ::protobuf::Message for ActionControlGroup { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_enum()?; self.action = ::std::option::Option::Some(tmp); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_uint32()?; self.control_group_index = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.action { my_size += ::protobuf::rt::enum_size(1, v); } if let Some(v) = self.control_group_index { my_size += ::protobuf::rt::value_size(2, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.action { os.write_enum(1, v.value())?; } if let Some(v) = self.control_group_index { os.write_uint32(2, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionControlGroup { fn new() -> ActionControlGroup { ActionControlGroup::new() } fn descriptor_static(_: ::std::option::Option<ActionControlGroup>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeEnum<ActionControlGroup_ControlGroupAction>>( "action", ActionControlGroup::get_action_for_reflect, ActionControlGroup::mut_action_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( "control_group_index", ActionControlGroup::get_control_group_index_for_reflect, ActionControlGroup::mut_control_group_index_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionControlGroup>( "ActionControlGroup", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionControlGroup { fn clear(&mut self) { self.clear_action(); self.clear_control_group_index(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionControlGroup { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionControlGroup { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(Clone,PartialEq,Eq,Debug,Hash)] pub enum ActionControlGroup_ControlGroupAction { Recall = 1, Set = 2, Append = 3, SetAndSteal = 4, AppendAndSteal = 5, } impl ::protobuf::ProtobufEnum for ActionControlGroup_ControlGroupAction { fn value(&self) -> i32 { *self as i32 } fn from_i32(value: i32) -> ::std::option::Option<ActionControlGroup_ControlGroupAction> { match value { 1 => ::std::option::Option::Some(ActionControlGroup_ControlGroupAction::Recall), 2 => ::std::option::Option::Some(ActionControlGroup_ControlGroupAction::Set), 3 => ::std::option::Option::Some(ActionControlGroup_ControlGroupAction::Append), 4 => ::std::option::Option::Some(ActionControlGroup_ControlGroupAction::SetAndSteal), 5 => ::std::option::Option::Some(ActionControlGroup_ControlGroupAction::AppendAndSteal), _ => ::std::option::Option::None } } fn values() -> &'static [Self] { static values: &'static [ActionControlGroup_ControlGroupAction] = &[ ActionControlGroup_ControlGroupAction::Recall, ActionControlGroup_ControlGroupAction::Set, ActionControlGroup_ControlGroupAction::Append, ActionControlGroup_ControlGroupAction::SetAndSteal, ActionControlGroup_ControlGroupAction::AppendAndSteal, ]; values } fn enum_descriptor_static(_: ::std::option::Option<ActionControlGroup_ControlGroupAction>) -> &'static ::protobuf::reflect::EnumDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::EnumDescriptor, }; unsafe { descriptor.get(|| { ::protobuf::reflect::EnumDescriptor::new("ActionControlGroup_ControlGroupAction", file_descriptor_proto()) }) } } } impl ::std::marker::Copy for ActionControlGroup_ControlGroupAction { } impl ::protobuf::reflect::ProtobufValue for ActionControlGroup_ControlGroupAction { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Enum(self.descriptor()) } } #[derive(PartialEq,Clone,Default)] pub struct ActionSelectArmy { // message fields selection_add: ::std::option::Option<bool>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionSelectArmy {} impl ActionSelectArmy { pub fn new() -> ActionSelectArmy { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionSelectArmy { static mut instance: ::protobuf::lazy::Lazy<ActionSelectArmy> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionSelectArmy, }; unsafe { instance.get(ActionSelectArmy::new) } } // optional bool selection_add = 1; pub fn clear_selection_add(&mut self) { self.selection_add = ::std::option::Option::None; } pub fn has_selection_add(&self) -> bool { self.selection_add.is_some() } // Param is passed by value, moved pub fn set_selection_add(&mut self, v: bool) { self.selection_add = ::std::option::Option::Some(v); } pub fn get_selection_add(&self) -> bool { self.selection_add.unwrap_or(false) } fn get_selection_add_for_reflect(&self) -> &::std::option::Option<bool> { &self.selection_add } fn mut_selection_add_for_reflect(&mut self) -> &mut ::std::option::Option<bool> { &mut self.selection_add } } impl ::protobuf::Message for ActionSelectArmy { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_bool()?; self.selection_add = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.selection_add { my_size += 2; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.selection_add { os.write_bool(1, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionSelectArmy { fn new() -> ActionSelectArmy { ActionSelectArmy::new() } fn descriptor_static(_: ::std::option::Option<ActionSelectArmy>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeBool>( "selection_add", ActionSelectArmy::get_selection_add_for_reflect, ActionSelectArmy::mut_selection_add_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionSelectArmy>( "ActionSelectArmy", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionSelectArmy { fn clear(&mut self) { self.clear_selection_add(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionSelectArmy { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionSelectArmy { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionSelectWarpGates { // message fields selection_add: ::std::option::Option<bool>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionSelectWarpGates {} impl ActionSelectWarpGates { pub fn new() -> ActionSelectWarpGates { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionSelectWarpGates { static mut instance: ::protobuf::lazy::Lazy<ActionSelectWarpGates> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionSelectWarpGates, }; unsafe { instance.get(ActionSelectWarpGates::new) } } // optional bool selection_add = 1; pub fn clear_selection_add(&mut self) { self.selection_add = ::std::option::Option::None; } pub fn has_selection_add(&self) -> bool { self.selection_add.is_some() } // Param is passed by value, moved pub fn set_selection_add(&mut self, v: bool) { self.selection_add = ::std::option::Option::Some(v); } pub fn get_selection_add(&self) -> bool { self.selection_add.unwrap_or(false) } fn get_selection_add_for_reflect(&self) -> &::std::option::Option<bool> { &self.selection_add } fn mut_selection_add_for_reflect(&mut self) -> &mut ::std::option::Option<bool> { &mut self.selection_add } } impl ::protobuf::Message for ActionSelectWarpGates { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_bool()?; self.selection_add = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.selection_add { my_size += 2; } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.selection_add { os.write_bool(1, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionSelectWarpGates { fn new() -> ActionSelectWarpGates { ActionSelectWarpGates::new() } fn descriptor_static(_: ::std::option::Option<ActionSelectWarpGates>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeBool>( "selection_add", ActionSelectWarpGates::get_selection_add_for_reflect, ActionSelectWarpGates::mut_selection_add_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionSelectWarpGates>( "ActionSelectWarpGates", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionSelectWarpGates { fn clear(&mut self) { self.clear_selection_add(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionSelectWarpGates { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionSelectWarpGates { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionSelectLarva { // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionSelectLarva {} impl ActionSelectLarva { pub fn new() -> ActionSelectLarva { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionSelectLarva { static mut instance: ::protobuf::lazy::Lazy<ActionSelectLarva> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionSelectLarva, }; unsafe { instance.get(ActionSelectLarva::new) } } } impl ::protobuf::Message for ActionSelectLarva { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionSelectLarva { fn new() -> ActionSelectLarva { ActionSelectLarva::new() } fn descriptor_static(_: ::std::option::Option<ActionSelectLarva>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let fields = ::std::vec::Vec::new(); ::protobuf::reflect::MessageDescriptor::new::<ActionSelectLarva>( "ActionSelectLarva", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionSelectLarva { fn clear(&mut self) { self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionSelectLarva { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionSelectLarva { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionSelectIdleWorker { // message fields field_type: ::std::option::Option<ActionSelectIdleWorker_Type>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionSelectIdleWorker {} impl ActionSelectIdleWorker { pub fn new() -> ActionSelectIdleWorker { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionSelectIdleWorker { static mut instance: ::protobuf::lazy::Lazy<ActionSelectIdleWorker> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionSelectIdleWorker, }; unsafe { instance.get(ActionSelectIdleWorker::new) } } // optional .SC2APIProtocol.ActionSelectIdleWorker.Type type = 1; pub fn clear_field_type(&mut self) { self.field_type = ::std::option::Option::None; } pub fn has_field_type(&self) -> bool { self.field_type.is_some() } // Param is passed by value, moved pub fn set_field_type(&mut self, v: ActionSelectIdleWorker_Type) { self.field_type = ::std::option::Option::Some(v); } pub fn get_field_type(&self) -> ActionSelectIdleWorker_Type { self.field_type.unwrap_or(ActionSelectIdleWorker_Type::Set) } fn get_field_type_for_reflect(&self) -> &::std::option::Option<ActionSelectIdleWorker_Type> { &self.field_type } fn mut_field_type_for_reflect(&mut self) -> &mut ::std::option::Option<ActionSelectIdleWorker_Type> { &mut self.field_type } } impl ::protobuf::Message for ActionSelectIdleWorker { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_enum()?; self.field_type = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.field_type { my_size += ::protobuf::rt::enum_size(1, v); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.field_type { os.write_enum(1, v.value())?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionSelectIdleWorker { fn new() -> ActionSelectIdleWorker { ActionSelectIdleWorker::new() } fn descriptor_static(_: ::std::option::Option<ActionSelectIdleWorker>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeEnum<ActionSelectIdleWorker_Type>>( "type", ActionSelectIdleWorker::get_field_type_for_reflect, ActionSelectIdleWorker::mut_field_type_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionSelectIdleWorker>( "ActionSelectIdleWorker", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionSelectIdleWorker { fn clear(&mut self) { self.clear_field_type(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionSelectIdleWorker { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionSelectIdleWorker { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(Clone,PartialEq,Eq,Debug,Hash)] pub enum ActionSelectIdleWorker_Type { Set = 1, Add = 2, All = 3, AddAll = 4, } impl ::protobuf::ProtobufEnum for ActionSelectIdleWorker_Type { fn value(&self) -> i32 { *self as i32 } fn from_i32(value: i32) -> ::std::option::Option<ActionSelectIdleWorker_Type> { match value { 1 => ::std::option::Option::Some(ActionSelectIdleWorker_Type::Set), 2 => ::std::option::Option::Some(ActionSelectIdleWorker_Type::Add), 3 => ::std::option::Option::Some(ActionSelectIdleWorker_Type::All), 4 => ::std::option::Option::Some(ActionSelectIdleWorker_Type::AddAll), _ => ::std::option::Option::None } } fn values() -> &'static [Self] { static values: &'static [ActionSelectIdleWorker_Type] = &[ ActionSelectIdleWorker_Type::Set, ActionSelectIdleWorker_Type::Add, ActionSelectIdleWorker_Type::All, ActionSelectIdleWorker_Type::AddAll, ]; values } fn enum_descriptor_static(_: ::std::option::Option<ActionSelectIdleWorker_Type>) -> &'static ::protobuf::reflect::EnumDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::EnumDescriptor, }; unsafe { descriptor.get(|| { ::protobuf::reflect::EnumDescriptor::new("ActionSelectIdleWorker_Type", file_descriptor_proto()) }) } } } impl ::std::marker::Copy for ActionSelectIdleWorker_Type { } impl ::protobuf::reflect::ProtobufValue for ActionSelectIdleWorker_Type { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Enum(self.descriptor()) } } #[derive(PartialEq,Clone,Default)] pub struct ActionMultiPanel { // message fields field_type: ::std::option::Option<ActionMultiPanel_Type>, unit_index: ::std::option::Option<i32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionMultiPanel {} impl ActionMultiPanel { pub fn new() -> ActionMultiPanel { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionMultiPanel { static mut instance: ::protobuf::lazy::Lazy<ActionMultiPanel> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionMultiPanel, }; unsafe { instance.get(ActionMultiPanel::new) } } // optional .SC2APIProtocol.ActionMultiPanel.Type type = 1; pub fn clear_field_type(&mut self) { self.field_type = ::std::option::Option::None; } pub fn has_field_type(&self) -> bool { self.field_type.is_some() } // Param is passed by value, moved pub fn set_field_type(&mut self, v: ActionMultiPanel_Type) { self.field_type = ::std::option::Option::Some(v); } pub fn get_field_type(&self) -> ActionMultiPanel_Type { self.field_type.unwrap_or(ActionMultiPanel_Type::SingleSelect) } fn get_field_type_for_reflect(&self) -> &::std::option::Option<ActionMultiPanel_Type> { &self.field_type } fn mut_field_type_for_reflect(&mut self) -> &mut ::std::option::Option<ActionMultiPanel_Type> { &mut self.field_type } // optional int32 unit_index = 2; pub fn clear_unit_index(&mut self) { self.unit_index = ::std::option::Option::None; } pub fn has_unit_index(&self) -> bool { self.unit_index.is_some() } // Param is passed by value, moved pub fn set_unit_index(&mut self, v: i32) { self.unit_index = ::std::option::Option::Some(v); } pub fn get_unit_index(&self) -> i32 { self.unit_index.unwrap_or(0) } fn get_unit_index_for_reflect(&self) -> &::std::option::Option<i32> { &self.unit_index } fn mut_unit_index_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.unit_index } } impl ::protobuf::Message for ActionMultiPanel { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_enum()?; self.field_type = ::std::option::Option::Some(tmp); }, 2 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.unit_index = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.field_type { my_size += ::protobuf::rt::enum_size(1, v); } if let Some(v) = self.unit_index { my_size += ::protobuf::rt::value_size(2, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.field_type { os.write_enum(1, v.value())?; } if let Some(v) = self.unit_index { os.write_int32(2, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionMultiPanel { fn new() -> ActionMultiPanel { ActionMultiPanel::new() } fn descriptor_static(_: ::std::option::Option<ActionMultiPanel>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeEnum<ActionMultiPanel_Type>>( "type", ActionMultiPanel::get_field_type_for_reflect, ActionMultiPanel::mut_field_type_for_reflect, )); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "unit_index", ActionMultiPanel::get_unit_index_for_reflect, ActionMultiPanel::mut_unit_index_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionMultiPanel>( "ActionMultiPanel", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionMultiPanel { fn clear(&mut self) { self.clear_field_type(); self.clear_unit_index(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionMultiPanel { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionMultiPanel { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(Clone,PartialEq,Eq,Debug,Hash)] pub enum ActionMultiPanel_Type { SingleSelect = 1, DeselectUnit = 2, SelectAllOfType = 3, DeselectAllOfType = 4, } impl ::protobuf::ProtobufEnum for ActionMultiPanel_Type { fn value(&self) -> i32 { *self as i32 } fn from_i32(value: i32) -> ::std::option::Option<ActionMultiPanel_Type> { match value { 1 => ::std::option::Option::Some(ActionMultiPanel_Type::SingleSelect), 2 => ::std::option::Option::Some(ActionMultiPanel_Type::DeselectUnit), 3 => ::std::option::Option::Some(ActionMultiPanel_Type::SelectAllOfType), 4 => ::std::option::Option::Some(ActionMultiPanel_Type::DeselectAllOfType), _ => ::std::option::Option::None } } fn values() -> &'static [Self] { static values: &'static [ActionMultiPanel_Type] = &[ ActionMultiPanel_Type::SingleSelect, ActionMultiPanel_Type::DeselectUnit, ActionMultiPanel_Type::SelectAllOfType, ActionMultiPanel_Type::DeselectAllOfType, ]; values } fn enum_descriptor_static(_: ::std::option::Option<ActionMultiPanel_Type>) -> &'static ::protobuf::reflect::EnumDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::EnumDescriptor, }; unsafe { descriptor.get(|| { ::protobuf::reflect::EnumDescriptor::new("ActionMultiPanel_Type", file_descriptor_proto()) }) } } } impl ::std::marker::Copy for ActionMultiPanel_Type { } impl ::protobuf::reflect::ProtobufValue for ActionMultiPanel_Type { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Enum(self.descriptor()) } } #[derive(PartialEq,Clone,Default)] pub struct ActionCargoPanelUnload { // message fields unit_index: ::std::option::Option<i32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionCargoPanelUnload {} impl ActionCargoPanelUnload { pub fn new() -> ActionCargoPanelUnload { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionCargoPanelUnload { static mut instance: ::protobuf::lazy::Lazy<ActionCargoPanelUnload> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionCargoPanelUnload, }; unsafe { instance.get(ActionCargoPanelUnload::new) } } // optional int32 unit_index = 1; pub fn clear_unit_index(&mut self) { self.unit_index = ::std::option::Option::None; } pub fn has_unit_index(&self) -> bool { self.unit_index.is_some() } // Param is passed by value, moved pub fn set_unit_index(&mut self, v: i32) { self.unit_index = ::std::option::Option::Some(v); } pub fn get_unit_index(&self) -> i32 { self.unit_index.unwrap_or(0) } fn get_unit_index_for_reflect(&self) -> &::std::option::Option<i32> { &self.unit_index } fn mut_unit_index_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.unit_index } } impl ::protobuf::Message for ActionCargoPanelUnload { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.unit_index = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.unit_index { my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.unit_index { os.write_int32(1, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionCargoPanelUnload { fn new() -> ActionCargoPanelUnload { ActionCargoPanelUnload::new() } fn descriptor_static(_: ::std::option::Option<ActionCargoPanelUnload>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "unit_index", ActionCargoPanelUnload::get_unit_index_for_reflect, ActionCargoPanelUnload::mut_unit_index_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionCargoPanelUnload>( "ActionCargoPanelUnload", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionCargoPanelUnload { fn clear(&mut self) { self.clear_unit_index(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionCargoPanelUnload { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionCargoPanelUnload { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionProductionPanelRemoveFromQueue { // message fields unit_index: ::std::option::Option<i32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionProductionPanelRemoveFromQueue {} impl ActionProductionPanelRemoveFromQueue { pub fn new() -> ActionProductionPanelRemoveFromQueue { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionProductionPanelRemoveFromQueue { static mut instance: ::protobuf::lazy::Lazy<ActionProductionPanelRemoveFromQueue> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionProductionPanelRemoveFromQueue, }; unsafe { instance.get(ActionProductionPanelRemoveFromQueue::new) } } // optional int32 unit_index = 1; pub fn clear_unit_index(&mut self) { self.unit_index = ::std::option::Option::None; } pub fn has_unit_index(&self) -> bool { self.unit_index.is_some() } // Param is passed by value, moved pub fn set_unit_index(&mut self, v: i32) { self.unit_index = ::std::option::Option::Some(v); } pub fn get_unit_index(&self) -> i32 { self.unit_index.unwrap_or(0) } fn get_unit_index_for_reflect(&self) -> &::std::option::Option<i32> { &self.unit_index } fn mut_unit_index_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.unit_index } } impl ::protobuf::Message for ActionProductionPanelRemoveFromQueue { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.unit_index = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.unit_index { my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.unit_index { os.write_int32(1, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionProductionPanelRemoveFromQueue { fn new() -> ActionProductionPanelRemoveFromQueue { ActionProductionPanelRemoveFromQueue::new() } fn descriptor_static(_: ::std::option::Option<ActionProductionPanelRemoveFromQueue>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "unit_index", ActionProductionPanelRemoveFromQueue::get_unit_index_for_reflect, ActionProductionPanelRemoveFromQueue::mut_unit_index_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionProductionPanelRemoveFromQueue>( "ActionProductionPanelRemoveFromQueue", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionProductionPanelRemoveFromQueue { fn clear(&mut self) { self.clear_unit_index(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionProductionPanelRemoveFromQueue { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionProductionPanelRemoveFromQueue { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ActionToggleAutocast { // message fields ability_id: ::std::option::Option<i32>, // special fields unknown_fields: ::protobuf::UnknownFields, cached_size: ::protobuf::CachedSize, } // see codegen.rs for the explanation why impl Sync explicitly unsafe impl ::std::marker::Sync for ActionToggleAutocast {} impl ActionToggleAutocast { pub fn new() -> ActionToggleAutocast { ::std::default::Default::default() } pub fn default_instance() -> &'static ActionToggleAutocast { static mut instance: ::protobuf::lazy::Lazy<ActionToggleAutocast> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ActionToggleAutocast, }; unsafe { instance.get(ActionToggleAutocast::new) } } // optional int32 ability_id = 1; pub fn clear_ability_id(&mut self) { self.ability_id = ::std::option::Option::None; } pub fn has_ability_id(&self) -> bool { self.ability_id.is_some() } // Param is passed by value, moved pub fn set_ability_id(&mut self, v: i32) { self.ability_id = ::std::option::Option::Some(v); } pub fn get_ability_id(&self) -> i32 { self.ability_id.unwrap_or(0) } fn get_ability_id_for_reflect(&self) -> &::std::option::Option<i32> { &self.ability_id } fn mut_ability_id_for_reflect(&mut self) -> &mut ::std::option::Option<i32> { &mut self.ability_id } } impl ::protobuf::Message for ActionToggleAutocast { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { if wire_type != ::protobuf::wire_format::WireTypeVarint { return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); } let tmp = is.read_int32()?; self.ability_id = ::std::option::Option::Some(tmp); }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if let Some(v) = self.ability_id { my_size += ::protobuf::rt::value_size(1, v, ::protobuf::wire_format::WireTypeVarint); } my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> { if let Some(v) = self.ability_id { os.write_int32(1, v)?; } os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &::std::any::Any { self as &::std::any::Any } fn as_any_mut(&mut self) -> &mut ::std::any::Any { self as &mut ::std::any::Any } fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { ::protobuf::MessageStatic::descriptor_static(None::<Self>) } } impl ::protobuf::MessageStatic for ActionToggleAutocast { fn new() -> ActionToggleAutocast { ActionToggleAutocast::new() } fn descriptor_static(_: ::std::option::Option<ActionToggleAutocast>) -> &'static ::protobuf::reflect::MessageDescriptor { static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::reflect::MessageDescriptor, }; unsafe { descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeInt32>( "ability_id", ActionToggleAutocast::get_ability_id_for_reflect, ActionToggleAutocast::mut_ability_id_for_reflect, )); ::protobuf::reflect::MessageDescriptor::new::<ActionToggleAutocast>( "ActionToggleAutocast", fields, file_descriptor_proto() ) }) } } } impl ::protobuf::Clear for ActionToggleAutocast { fn clear(&mut self) { self.clear_ability_id(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ActionToggleAutocast { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ActionToggleAutocast { fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef { ::protobuf::reflect::ProtobufValueRef::Message(self) } } static file_descriptor_proto_data: &'static [u8] = b"\ \n\x19s2clientprotocol/ui.proto\x12\x0eSC2APIProtocol\"\xb0\x02\n\rObser\ vationUI\x124\n\x06groups\x18\x01\x20\x03(\x0b2\x1c.SC2APIProtocol.Contr\ olGroupR\x06groups\x125\n\x06single\x18\x02\x20\x01(\x0b2\x1b.SC2APIProt\ ocol.SinglePanelH\0R\x06single\x122\n\x05multi\x18\x03\x20\x01(\x0b2\x1a\ .SC2APIProtocol.MultiPanelH\0R\x05multi\x122\n\x05cargo\x18\x04\x20\x01(\ \x0b2\x1a.SC2APIProtocol.CargoPanelH\0R\x05cargo\x12A\n\nproduction\x18\ \x05\x20\x01(\x0b2\x1f.SC2APIProtocol.ProductionPanelH\0R\nproductionB\ \x07\n\x05panel\"~\n\x0cControlGroup\x12.\n\x13control_group_index\x18\ \x01\x20\x01(\rR\x11controlGroupIndex\x12(\n\x10leader_unit_type\x18\x02\ \x20\x01(\rR\x0eleaderUnitType\x12\x14\n\x05count\x18\x03\x20\x01(\rR\ \x05count\"\xa6\x02\n\x08UnitInfo\x12\x1b\n\tunit_type\x18\x01\x20\x01(\ \rR\x08unitType\x12'\n\x0fplayer_relative\x18\x02\x20\x01(\rR\x0eplayerR\ elative\x12\x16\n\x06health\x18\x03\x20\x01(\x05R\x06health\x12\x18\n\ \x07shields\x18\x04\x20\x01(\x05R\x07shields\x12\x16\n\x06energy\x18\x05\ \x20\x01(\x05R\x06energy\x122\n\x15transport_slots_taken\x18\x06\x20\x01\ (\x05R\x13transportSlotsTaken\x12%\n\x0ebuild_progress\x18\x07\x20\x01(\ \x02R\rbuildProgress\x12/\n\x06add_on\x18\x08\x20\x01(\x0b2\x18.SC2APIPr\ otocol.UnitInfoR\x05addOn\";\n\x0bSinglePanel\x12,\n\x04unit\x18\x01\x20\ \x01(\x0b2\x18.SC2APIProtocol.UnitInfoR\x04unit\"<\n\nMultiPanel\x12.\n\ \x05units\x18\x01\x20\x03(\x0b2\x18.SC2APIProtocol.UnitInfoR\x05units\"\ \x9d\x01\n\nCargoPanel\x12,\n\x04unit\x18\x01\x20\x01(\x0b2\x18.SC2APIPr\ otocol.UnitInfoR\x04unit\x128\n\npassengers\x18\x02\x20\x03(\x0b2\x18.SC\ 2APIProtocol.UnitInfoR\npassengers\x12'\n\x0fslots_available\x18\x03\x20\ \x01(\x05R\x0eslotsAvailable\"z\n\x0fProductionPanel\x12,\n\x04unit\x18\ \x01\x20\x01(\x0b2\x18.SC2APIProtocol.UnitInfoR\x04unit\x129\n\x0bbuild_\ queue\x18\x02\x20\x03(\x0b2\x18.SC2APIProtocol.UnitInfoR\nbuildQueue\"\ \xdd\x05\n\x08ActionUI\x12I\n\rcontrol_group\x18\x01\x20\x01(\x0b2\".SC2\ APIProtocol.ActionControlGroupH\0R\x0ccontrolGroup\x12C\n\x0bselect_army\ \x18\x02\x20\x01(\x0b2\x20.SC2APIProtocol.ActionSelectArmyH\0R\nselectAr\ my\x12S\n\x11select_warp_gates\x18\x03\x20\x01(\x0b2%.SC2APIProtocol.Act\ ionSelectWarpGatesH\0R\x0fselectWarpGates\x12F\n\x0cselect_larva\x18\x04\ \x20\x01(\x0b2!.SC2APIProtocol.ActionSelectLarvaH\0R\x0bselectLarva\x12V\ \n\x12select_idle_worker\x18\x05\x20\x01(\x0b2&.SC2APIProtocol.ActionSel\ ectIdleWorkerH\0R\x10selectIdleWorker\x12C\n\x0bmulti_panel\x18\x06\x20\ \x01(\x0b2\x20.SC2APIProtocol.ActionMultiPanelH\0R\nmultiPanel\x12I\n\ \x0bcargo_panel\x18\x07\x20\x01(\x0b2&.SC2APIProtocol.ActionCargoPanelUn\ loadH\0R\ncargoPanel\x12a\n\x10production_panel\x18\x08\x20\x01(\x0b24.S\ C2APIProtocol.ActionProductionPanelRemoveFromQueueH\0R\x0fproductionPane\ l\x12O\n\x0ftoggle_autocast\x18\t\x20\x01(\x0b2$.SC2APIProtocol.ActionTo\ ggleAutocastH\0R\x0etoggleAutocastB\x08\n\x06action\"\xef\x01\n\x12Actio\ nControlGroup\x12M\n\x06action\x18\x01\x20\x01(\x0e25.SC2APIProtocol.Act\ ionControlGroup.ControlGroupActionR\x06action\x12.\n\x13control_group_in\ dex\x18\x02\x20\x01(\rR\x11controlGroupIndex\"Z\n\x12ControlGroupAction\ \x12\n\n\x06Recall\x10\x01\x12\x07\n\x03Set\x10\x02\x12\n\n\x06Append\ \x10\x03\x12\x0f\n\x0bSetAndSteal\x10\x04\x12\x12\n\x0eAppendAndSteal\ \x10\x05\"7\n\x10ActionSelectArmy\x12#\n\rselection_add\x18\x01\x20\x01(\ \x08R\x0cselectionAdd\"<\n\x15ActionSelectWarpGates\x12#\n\rselection_ad\ d\x18\x01\x20\x01(\x08R\x0cselectionAdd\"\x13\n\x11ActionSelectLarva\"\ \x88\x01\n\x16ActionSelectIdleWorker\x12?\n\x04type\x18\x01\x20\x01(\x0e\ 2+.SC2APIProtocol.ActionSelectIdleWorker.TypeR\x04type\"-\n\x04Type\x12\ \x07\n\x03Set\x10\x01\x12\x07\n\x03Add\x10\x02\x12\x07\n\x03All\x10\x03\ \x12\n\n\x06AddAll\x10\x04\"\xc4\x01\n\x10ActionMultiPanel\x129\n\x04typ\ e\x18\x01\x20\x01(\x0e2%.SC2APIProtocol.ActionMultiPanel.TypeR\x04type\ \x12\x1d\n\nunit_index\x18\x02\x20\x01(\x05R\tunitIndex\"V\n\x04Type\x12\ \x10\n\x0cSingleSelect\x10\x01\x12\x10\n\x0cDeselectUnit\x10\x02\x12\x13\ \n\x0fSelectAllOfType\x10\x03\x12\x15\n\x11DeselectAllOfType\x10\x04\"7\ \n\x16ActionCargoPanelUnload\x12\x1d\n\nunit_index\x18\x01\x20\x01(\x05R\ \tunitIndex\"E\n$ActionProductionPanelRemoveFromQueue\x12\x1d\n\nunit_in\ dex\x18\x01\x20\x01(\x05R\tunitIndex\"5\n\x14ActionToggleAutocast\x12\ \x1d\n\nability_id\x18\x01\x20\x01(\x05R\tabilityId\ "; static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy { lock: ::protobuf::lazy::ONCE_INIT, ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto, }; fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap() } pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { unsafe { file_descriptor_proto_lazy.get(|| { parse_descriptor_proto() }) } }
{ if let ::std::option::Option::Some(ObservationUI_oneof_panel::multi(_)) = self.panel { } else { self.panel = ::std::option::Option::Some(ObservationUI_oneof_panel::multi(MultiPanel::new())); } match self.panel { ::std::option::Option::Some(ObservationUI_oneof_panel::multi(ref mut v)) => v, _ => panic!(), } }
init.rs
// Copyright 2020 - Nym Technologies SA <[email protected]> // SPDX-License-Identifier: Apache-2.0 use crate::commands::*; use crate::config::persistence::pathfinder::GatewayPathfinder; use crate::config::Config; use clap::{App, Arg, ArgMatches}; use config::NymConfig; use crypto::asymmetric::{encryption, identity}; pub fn command_args<'a, 'b>() -> clap::App<'a, 'b> { App::new("init") .about("Initialise the gateway") .arg( Arg::with_name(ID_ARG_NAME) .long(ID_ARG_NAME) .help("Id of the gateway we want to create config for.") .takes_value(true) .required(true), ) .arg( Arg::with_name(HOST_ARG_NAME) .long(HOST_ARG_NAME) .help("The custom host on which the gateway will be running for receiving sphinx packets") .takes_value(true) .required(true), ) .arg( Arg::with_name(MIX_PORT_ARG_NAME) .long(MIX_PORT_ARG_NAME) .help("The port on which the gateway will be listening for sphinx packets") .takes_value(true) ) .arg( Arg::with_name(CLIENTS_PORT_ARG_NAME) .long(CLIENTS_PORT_ARG_NAME) .help("The port on which the gateway will be listening for clients gateway-requests") .takes_value(true) ) .arg( Arg::with_name(ANNOUNCE_HOST_ARG_NAME) .long(ANNOUNCE_HOST_ARG_NAME) .help("The host that will be reported to the directory server") .takes_value(true), ) .arg( Arg::with_name(INBOXES_ARG_NAME) .long(INBOXES_ARG_NAME) .help("Directory with inboxes where all packets for the clients are stored") .takes_value(true) ) .arg( Arg::with_name(CLIENTS_LEDGER_ARG_NAME) .long(CLIENTS_LEDGER_ARG_NAME) .help("Ledger file containing registered clients") .takes_value(true) ) .arg( Arg::with_name(VALIDATORS_ARG_NAME) .long(VALIDATORS_ARG_NAME) .help("Comma separated list of rest endpoints of the validators") .takes_value(true), ) } fn show_bonding_info(config: &Config) { fn load_sphinx_keys(pathfinder: &GatewayPathfinder) -> encryption::KeyPair { let sphinx_keypair: encryption::KeyPair = pemstore::load_keypair(&pemstore::KeyPairPath::new( pathfinder.private_encryption_key().to_owned(), pathfinder.public_encryption_key().to_owned(), )) .expect("Failed to read stored sphinx key files"); println!( "Public sphinx key: {}\n", sphinx_keypair.public_key().to_base58_string() ); sphinx_keypair } fn load_identity_keys(pathfinder: &GatewayPathfinder) -> identity::KeyPair { let identity_keypair: identity::KeyPair = pemstore::load_keypair(&pemstore::KeyPairPath::new( pathfinder.private_identity_key().to_owned(), pathfinder.public_identity_key().to_owned(), )) .expect("Failed to read stored identity key files"); println!( "Public identity key: {}\n", identity_keypair.public_key().to_base58_string() ); identity_keypair } let pathfinder = GatewayPathfinder::new_from_config(config); let identity_keypair = load_identity_keys(&pathfinder); let sphinx_keypair = load_sphinx_keys(&pathfinder); println!( "\nTo bond your gateway you will [most likely] need to provide the following: Identity key: {} Sphinx key: {} Host: {} Mix Port: {} Clients Port: {} Location: [physical location of your node's server] Version: {} ", identity_keypair.public_key().to_base58_string(), sphinx_keypair.public_key().to_base58_string(), config.get_announce_address(), config.get_mix_port(), config.get_clients_port(), config.get_version(), ); } pub fn execute(matches: &ArgMatches) { let id = matches.value_of(ID_ARG_NAME).unwrap(); println!("Initialising gateway {}...", id); let already_init = if Config::default_config_file_path(Some(id)).exists()
else { false }; let mut config = Config::new(id); config = override_config(config, matches); // if gateway was already initialised, don't generate new keys if !already_init { let mut rng = rand::rngs::OsRng; let identity_keys = identity::KeyPair::new(&mut rng); let sphinx_keys = encryption::KeyPair::new(&mut rng); let pathfinder = GatewayPathfinder::new_from_config(&config); pemstore::store_keypair( &sphinx_keys, &pemstore::KeyPairPath::new( pathfinder.private_encryption_key().to_owned(), pathfinder.public_encryption_key().to_owned(), ), ) .expect("Failed to save sphinx keys"); pemstore::store_keypair( &identity_keys, &pemstore::KeyPairPath::new( pathfinder.private_identity_key().to_owned(), pathfinder.public_identity_key().to_owned(), ), ) .expect("Failed to save identity keys"); println!("Saved identity and mixnet sphinx keypairs"); } let config_save_location = config.get_config_file_save_location(); config .save_to_file(None) .expect("Failed to save the config file"); println!("Saved configuration file to {:?}", config_save_location); println!("Gateway configuration completed.\n\n\n"); show_bonding_info(&config); }
{ println!("Gateway \"{}\" was already initialised before! Config information will be overwritten (but keys will be kept)!", id); true }
jwt.strategy.ts
import { JwtDto } from './dto/jwt.dto'; import { Strategy, ExtractJwt } from 'passport-jwt'; import { PassportStrategy } from '@nestjs/passport'; import { Injectable, UnauthorizedException } from '@nestjs/common'; import { AuthService } from '../../services/auth.service'; import { User } from '@prisma/client'; import { ConfigService } from '@nestjs/config';
export class JwtStrategy extends PassportStrategy(Strategy) { constructor( private readonly authService: AuthService, readonly configService: ConfigService ) { super({ jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(), ignoreExpiration: false, secretOrKey: configService.get('JWT_PUBLIC_KEY'), algorithms: ['RS256'], }); } async validate(payload: JwtDto): Promise<User> { const user = await this.authService.validateUser(payload.userId); if (!user) { throw new UnauthorizedException(); } return user; } }
@Injectable()
urls.py
"""Contains all the url endpoints for interacting with Robinhood API.""" from robin_stocks.helper import id_for_chain, id_for_stock # Login def login_url(): return('https://api.robinhood.com/oauth2/token/') def challenge_url(challenge_id): return('https://api.robinhood.com/challenge/{0}/respond/'.format(challenge_id)) # Profiles def account_profile(): return('https://api.robinhood.com/accounts/') def basic_profile(): return('https://api.robinhood.com/user/basic_info/') def investment_profile(): return('https://api.robinhood.com/user/investment_profile/') def portfolio_profile(): return('https://api.robinhood.com/portfolios/') def security_profile(): return('https://api.robinhood.com/user/additional_info/') def user_profile(): return('https://api.robinhood.com/user/') # Stocks def earnings(): return('https://api.robinhood.com/marketdata/earnings/') def events(): return('https://api.robinhood.com/options/events/') def fundamentals(): return('https://api.robinhood.com/fundamentals/') def historicals(): return('https://api.robinhood.com/quotes/historicals/') def instruments(): return('https://api.robinhood.com/instruments/') def news(symbol): return('https://api.robinhood.com/midlands/news/{0}/?'.format(symbol)) def popularity(symbol): return('https://api.robinhood.com/instruments/{0}/popularity/'.format(id_for_stock(symbol))) def quotes(): return('https://api.robinhood.com/quotes/') def ratings(symbol): return('https://api.robinhood.com/midlands/ratings/{0}/'.format(id_for_stock(symbol))) def splits(symbol): return('https://api.robinhood.com/instruments/{0}/splits/'.format(id_for_stock(symbol))) # account def positions(): return('https://api.robinhood.com/positions/') def banktransfers(): return('https://api.robinhood.com/ach/transfers/') def daytrades(account): return('https://api.robinhood.com/accounts/{0}/recent_day_trades/'.format(account)) def dividends(): return('https://api.robinhood.com/dividends/') def documents(): return('https://api.robinhood.com/documents/') def linked(id=None, unlink=False): if unlink: return('https://api.robinhood.com/ach/relationships/{0}/unlink/'.format(id)) if id: return('https://api.robinhood.com/ach/relationships/{0}/'.format(id)) else: return('https://api.robinhood.com/ach/relationships/') def margin(): return('https://api.robinhood.com/margin/calls/') def margininterest(): return('https://api.robinhood.com/cash_journal/margin_interest_charges/') def notifications(tracker=False): if tracker: return('https://api.robinhood.com/midlands/notifications/notification_tracker/') else: return('https://api.robinhood.com/notifications/devices/') def referral(): return('https://api.robinhood.com/midlands/referral/') def stockloan(): return('https://api.robinhood.com/stock_loan/payments/') def subscription(): return('https://api.robinhood.com/subscription/subscription_fees/') def wiretransfers():
def watchlists(name=None, add=False): if add: return('https://api.robinhood.com/watchlists/{0}/bulk_add/'.format(name)) if name: return('https://api.robinhood.com/watchlists/{0}/'.format(name)) else: return('https://api.robinhood.com/watchlists/') # markets def currency(): return('https://nummus.robinhood.com/currency_pairs/') def markets(): return('https://api.robinhood.com/markets/') def movers(): return('https://api.robinhood.com/midlands/movers/sp500/') # options def aggregate(): return('https://api.robinhood.com/options/aggregate_positions/') def chains(symbol): return('https://api.robinhood.com/options/chains/{0}/'.format(id_for_chain(symbol))) def option_historicals(id): return('https://api.robinhood.com/marketdata/options/historicals/{0}/'.format(id)) def option_instruments(id=None): if id: return('https://api.robinhood.com/options/instruments/{0}/'.format(id)) else: return('https://api.robinhood.com/options/instruments/') def option_orders(orderID=None): if orderID: return('https://api.robinhood.com/options/orders/{0}/'.format(orderID)) else: return('https://api.robinhood.com/options/orders/') def option_positions(): return('https://api.robinhood.com/options/positions/') def marketdata_options(id): return('https://api.robinhood.com/marketdata/options/{0}/'.format(id)) # pricebook def marketdata_quotes(id): return ('https://api.robinhood.com/marketdata/quotes/{0}/'.format(id)) def marketdata_pricebook(id): return ('https://api.robinhood.com/marketdata/pricebook/snapshots/{0}/'.format(id)) # crypto def order_crypto(): return('https://nummus.robinhood.com/orders/') def crypto_account(): return('https://nummus.robinhood.com/accounts/') def crypto_currency_pairs(): return('https://nummus.robinhood.com/currency_pairs/') def crypto_quote(id): return('https://api.robinhood.com/marketdata/forex/quotes/{0}/'.format(id)) def crypto_holdings(): return('https://nummus.robinhood.com/holdings/') def crypto_historical(id): return('https://api.robinhood.com/marketdata/forex/historicals/{0}/'.format(id)) def crypto_orders(orderID=None): if orderID: return('https://nummus.robinhood.com/orders/{0}/'.format(orderID)) else: return('https://nummus.robinhood.com/orders/') def crypto_cancel(id): return('https://nummus.robinhood.com/orders/{0}/cancel/'.format(id)) # orders def cancel(url): return('https://api.robinhood.com/orders/{0}/cancel/'.format(url)) def option_cancel(id): return('https://api.robinhood.com/options/orders/{0}/cancel/'.format(id)) def orders(orderID=None): if orderID: return('https://api.robinhood.com/orders/{0}/'.format(orderID)) else: return('https://api.robinhood.com/orders/')
return('https://api.robinhood.com/wire/transfers')
sensor.py
"""Support for Sure PetCare Flaps/Pets sensors.""" from __future__ import annotations from typing import Any, cast from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_VOLTAGE, DEVICE_CLASS_BATTERY, MASS_GRAMS, PERCENTAGE, VOLUME_MILLILITERS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.update_coordinator import CoordinatorEntity from surepy.entities import SurepyEntity from surepy.entities.devices import ( Feeder as SureFeeder, FeederBowl as SureFeederBowl, Felaqua as SureFelaqua, Flap as SureFlap, SurepyDevice, ) from surepy.enums import EntityType, LockState # pylint: disable=relative-beyond-top-level from . import SurePetcareAPI from .const import ATTR_VOLTAGE_FULL, ATTR_VOLTAGE_LOW, DOMAIN, SPC, SURE_MANUFACTURER PARALLEL_UPDATES = 2 async def async_setup_platform( hass: HomeAssistant, config: ConfigEntry, async_add_entities: Any, discovery_info: Any = None, ) -> None: """Set up Sure PetCare sensor platform.""" await async_setup_entry(hass, config, async_add_entities) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Any ) -> None: """Set up config entry Sure PetCare Flaps sensors.""" entities: list[Flap | Felaqua | Feeder | FeederBowl | Battery] = [] spc: SurePetcareAPI = hass.data[DOMAIN][SPC] for surepy_entity in spc.coordinator.data.values(): if surepy_entity.type in [ EntityType.CAT_FLAP, EntityType.PET_FLAP, ]: entities.append(Flap(spc.coordinator, surepy_entity.id, spc)) elif surepy_entity.type == EntityType.FELAQUA: entities.append(Felaqua(spc.coordinator, surepy_entity.id, spc)) elif surepy_entity.type == EntityType.FEEDER: for bowl in surepy_entity.bowls.values(): entities.append( FeederBowl(spc.coordinator, surepy_entity.id, spc, bowl.raw_data()) ) entities.append(Feeder(spc.coordinator, surepy_entity.id, spc)) if surepy_entity.type in [ EntityType.CAT_FLAP, EntityType.PET_FLAP, EntityType.FEEDER, EntityType.FELAQUA, ]: voltage_batteries_full = cast( float, config_entry.options.get(ATTR_VOLTAGE_FULL) ) voltage_batteries_low = cast( float, config_entry.options.get(ATTR_VOLTAGE_LOW) ) entities.append( Battery( spc.coordinator, surepy_entity.id, spc, voltage_full=voltage_batteries_full, voltage_low=voltage_batteries_low, ) ) async_add_entities(entities) class SurePetcareSensor(CoordinatorEntity, SensorEntity): """A binary sensor implementation for Sure Petcare Entities.""" _attr_should_poll = False def __init__(self, coordinator, _id: int, spc: SurePetcareAPI): """Initialize a Sure Petcare sensor.""" super().__init__(coordinator) self._id = _id self._spc: SurePetcareAPI = spc self._coordinator = coordinator self._surepy_entity: SurepyEntity = self._coordinator.data[_id] self._state: dict[str, Any] = self._surepy_entity.raw_data()["status"] self._attr_available = bool(self._state) self._attr_unique_id = f"{self._surepy_entity.household_id}-{self._id}" self._attr_extra_state_attributes = ( {**self._surepy_entity.raw_data()} if self._state else {} ) self._attr_name: str = ( f"{self._surepy_entity.type.name.replace('_', ' ').title()} " f"{self._surepy_entity.name.capitalize()}" ) @property def device_info(self): device = {} try: model = f"{self._surepy_entity.type.name.replace('_', ' ').title()}" if serial := self._surepy_entity.raw_data().get("serial_number"): model = f"{model} ({serial})" elif mac_address := self._surepy_entity.raw_data().get("mac_address"): model = f"{model} ({mac_address})" elif tag_id := self._surepy_entity.raw_data().get("tag_id"): model = f"{model} ({tag_id})" device = { "identifiers": {(DOMAIN, self._id)}, "name": self._surepy_entity.name.capitalize(), "manufacturer": SURE_MANUFACTURER, "model": model, } if self._state: versions = self._state.get("version", {}) if dev_fw_version := versions.get("device", {}).get("firmware"): device["sw_version"] = dev_fw_version if (lcd_version := versions.get("lcd", {})) and ( rf_version := versions.get("rf", {}) ): device["sw_version"] = ( f"lcd: {lcd_version.get('version', lcd_version)['firmware']} | " f"fw: {rf_version.get('version', rf_version)['firmware']}" ) except AttributeError: pass return device class Flap(SurePetcareSensor): """Sure Petcare Flap.""" def __init__(self, coordinator, _id: int, spc: SurePetcareAPI) -> None: super().__init__(coordinator, _id, spc) self._surepy_entity: SureFlap self._attr_entity_picture = self._surepy_entity.icon self._attr_unit_of_measurement = None if self._state: self._attr_extra_state_attributes = { "learn_mode": bool(self._state["learn_mode"]), **self._surepy_entity.raw_data(), } if locking := self._state.get("locking"):
@property def state(self) -> str | None: """Return battery level in percent.""" if ( state := cast(SureFlap, self._coordinator.data[self._id]) .raw_data() .get("status") ): return LockState(state["locking"]["mode"]).name.casefold() class Felaqua(SurePetcareSensor): """Sure Petcare Felaqua.""" def __init__(self, coordinator, _id: int, spc: SurePetcareAPI): super().__init__(coordinator, _id, spc) self._surepy_entity: SureFelaqua self._attr_entity_picture = self._surepy_entity.icon self._attr_unit_of_measurement = VOLUME_MILLILITERS @property def state(self) -> float | None: """Return the remaining water.""" if felaqua := cast(SureFelaqua, self._coordinator.data[self._id]): return int(felaqua.water_remaining) if felaqua.water_remaining else None class FeederBowl(SurePetcareSensor): """Sure Petcare Feeder Bowl.""" def __init__( self, coordinator, _id: int, spc: SurePetcareAPI, bowl_data: dict[str, int | str], ): """Initialize a Bowl sensor.""" super().__init__(coordinator, _id, spc) self.feeder_id = _id self.bowl_id = int(bowl_data["index"]) self._id = int(f"{_id}{str(self.bowl_id)}") self._spc: SurePetcareAPI = spc self._surepy_feeder_entity: SurepyEntity = self._coordinator.data[_id] self._surepy_entity: SureFeederBowl = self._coordinator.data[_id].bowls[ self.bowl_id ] self._state: dict[str, Any] = bowl_data # https://github.com/PyCQA/pylint/issues/2062 # pylint: disable=no-member self._attr_name = ( f"{EntityType.FEEDER.name.replace('_', ' ').title()} " f"{self._surepy_entity.name.capitalize()}" ) self._attr_icon = "mdi:bowl" self._attr_state = int(self._surepy_entity.weight) self._attr_unique_id = ( f"{self._surepy_feeder_entity.household_id}-{self.feeder_id}-{self.bowl_id}" ) self._attr_unit_of_measurement = MASS_GRAMS @property def state(self) -> float | None: """Return the remaining water.""" if (feeder := cast(SureFeeder, self._coordinator.data[self.feeder_id])) and ( weight := feeder.bowls[self.bowl_id].weight ): return int(weight) if weight and weight > 0 else None class Feeder(SurePetcareSensor): """Sure Petcare Feeder.""" def __init__(self, coordinator, _id: int, spc: SurePetcareAPI): super().__init__(coordinator, _id, spc) self._surepy_entity: SureFeeder self._attr_entity_picture = self._surepy_entity.icon self._attr_unit_of_measurement = MASS_GRAMS @property def state(self) -> float | None: """Return the total remaining food.""" if feeder := cast(SureFeeder, self._coordinator.data[self._id]): return int(feeder.total_weight) if feeder.total_weight else None class Battery(SurePetcareSensor): """Sure Petcare Flap.""" def __init__( self, coordinator, _id: int, spc: SurePetcareAPI, voltage_full: float, voltage_low: float, ): super().__init__(coordinator, _id, spc) self._surepy_entity: SurepyDevice self._attr_name = f"{self._attr_name} Battery Level" self.voltage_low = voltage_low self.voltage_full = voltage_full self._attr_unit_of_measurement = PERCENTAGE self._attr_device_class = DEVICE_CLASS_BATTERY self._attr_unique_id = ( f"{self._surepy_entity.household_id}-{self._surepy_entity.id}-battery" ) @property def state(self) -> int | None: """Return battery level in percent.""" if battery := cast(SurepyDevice, self._coordinator.data[self._id]): self._surepy_entity = battery battery_level = battery.calculate_battery_level( voltage_full=self.voltage_full, voltage_low=self.voltage_low ) # return batterie level between 0 and 100 return battery_level @property def extra_state_attributes(self) -> dict[str, Any]: """Return the additional attrs.""" attrs = {} if (device := cast(SurepyDevice, self._coordinator.data[self._id])) and ( state := device.raw_data().get("status") ): self._surepy_entity = device voltage = float(state["battery"]) attrs = { "battery_level": device.battery_level, ATTR_VOLTAGE: f"{voltage:.2f}", f"{ATTR_VOLTAGE}_per_battery": f"{voltage / 4:.2f}", } return attrs
self._attr_state = LockState(locking["mode"]).name.casefold()
mod.rs
// rpc-perf - RPC Performance Testing // Copyright 2015 Twitter, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate slab; pub mod buffer; pub mod config; pub mod connection; pub mod net; use self::config::Config; use self::connection::*; use self::net::InternetProtocol; use cfgtypes::*; use common::stats::Stat; use mio; use mio::unix::UnixReady; use mio::{Evented, Events, Poll, PollOpt, Token}; use mpmc::Queue; use std::collections::VecDeque; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::Arc; use std::time::Duration; use tic::{Clocksource, Sample, Sender}; const MAX_CONNECTIONS: usize = 65_536; const MAX_EVENTS: usize = 1024; const MAX_PENDING: usize = 1024; const TICK_MS: u64 = 1; fn pollopt_conn() -> PollOpt { PollOpt::edge() | PollOpt::oneshot() } type Slab<T> = slab::Slab<T, Token>; pub struct Client { config: Config, connections: Slab<Connection>, factory: Factory, poll: Poll, queue: Queue<Vec<u8>>, ready: VecDeque<Token>, stats: Sender<Stat>, times: Vec<u64>, rtimes: Vec<u64>, clocksource: Clocksource, protocol: Box<ProtocolParse>, request_timeout: Option<u64>, connect_timeout: Option<u64>, events: Option<Events>, } impl Default for Client { fn default() -> Client { Client::configured(Config::default()) } } impl Client { /// returns the default `Config` for a `Client` pub fn configure() -> Config { Default::default() } /// turn a `Config` into a `Client` fn configured(config: Config) -> Client { if config.stats().is_none() { halt!("need stats"); } if config.clocksource().is_none() { halt!("need clocksource"); } if config.protocol().is_none() { halt!("need protocol"); } let c = config.clone(); let queue = Queue::with_capacity(MAX_PENDING); let clocksource = config.clocksource().unwrap(); let factory = Factory::new(config.rx_buffer_size(), config.tx_buffer_size()); let mut client = Client { clocksource: clocksource.clone(), config: c, connections: Slab::with_capacity(MAX_CONNECTIONS), events: Some(Events::with_capacity(MAX_EVENTS)), factory: factory, poll: Poll::new().unwrap(), queue: queue, ready: VecDeque::new(), stats: config.stats().unwrap(), times: vec![clocksource.counter(); MAX_CONNECTIONS], rtimes: vec![clocksource.counter(); MAX_CONNECTIONS], protocol: Arc::clone(&config.protocol().unwrap()).new(), request_timeout: config.request_timeout(), connect_timeout: config.connect_timeout(), }; for server in client.config.servers() { if let Ok(sock_addr) = client.resolve(server.clone()) { for _ in 0..client.config.pool_size() { let connection = client.factory.connect(sock_addr); match client.connections.insert(connection) { Ok(token) => { client.send_stat(token, Stat::SocketCreate); if client.has_stream(token) { client.register(client.connections[token].stream().unwrap(), token); client.set_timeout(token); } else { error!("failure creating connection"); } } Err(_) => { halt!("error acquiring token for connection"); } } } } else { panic!("Error resolving: {}", server); } } client } #[inline] fn has_stream(&self, token: Token) -> bool { self.connections[token].stream().is_some() } #[inline] fn is_connection(&self, token: Token) -> bool { token.0 <= MAX_CONNECTIONS } fn set_timeout(&mut self, token: Token) { if self.is_connection(token) { if self.connections[token].is_connecting() { if let Some(t) = self.connect_timeout { let deadline = self.clocksource.counter() + t * self.clocksource.frequency() as u64 / 1000; self.connections[token].set_timeout(Some(deadline)); } } else if let Some(t) = self.request_timeout { let deadline = self.clocksource.counter() + t * self.clocksource.frequency() as u64 / 1000; self.connections[token].set_timeout(Some(deadline)); } } } /// register with the poller /// - reregister on failure fn register<E: ?Sized>(&self, io: &E, token: Token) where E: Evented, { match self .poll .register(io, token, self.event_set(token), self.poll_opt(token)) { Ok(_) => {} Err(e) => { if !self.poll.deregister(io).is_ok() { debug!("error registering {:?}: {}", token, e); } else { let _ = self.poll .register(io, token, self.event_set(token), self.poll_opt(token)); } } } } // remove from the poller fn deregister<E: ?Sized>(&self, io: &E) where E: Evented, { match self.poll.deregister(io) { Ok(_) => {} Err(e) => { debug!("error deregistering: {}", e); } } } #[inline] fn event_set(&self, token: Token) -> mio::Ready
#[inline] fn poll_opt(&self, token: Token) -> mio::PollOpt { if token.0 <= MAX_CONNECTIONS { pollopt_conn() } else { halt!("poll_opt() unknown token: {:?}", token); } } #[inline] fn state(&self, token: Token) -> State { self.connections[token].state() } #[inline] fn set_state(&mut self, token: Token, state: State) { self.connections[token].set_state(state); } fn close(&mut self, token: Token) { if let Some(s) = self.connections[token].stream() { self.deregister(s); } self.clear_timer(token); let _ = self.connections[token].close(); self.send_stat(token, Stat::SocketClose); } /// resolve host:ip to SocketAddr fn resolve(&mut self, server: String) -> Result<SocketAddr, &'static str> { if let Ok(result) = server.to_socket_addrs() { for addr in result { match addr { SocketAddr::V4(_) => { if self.config.internet_protocol() == InternetProtocol::Any || self.config.internet_protocol() == InternetProtocol::IpV4 { return Ok(addr); } } SocketAddr::V6(_) => { if self.config.internet_protocol() == InternetProtocol::Any || self.config.internet_protocol() == InternetProtocol::IpV6 { return Ok(addr); } } } } } Err("failed to convert to socket address") } /// reconnect helper fn reconnect(&mut self, token: Token) { debug!("reconnect {:?}", token); self.close(token); self.times[token.0] = self.clocksource.counter(); self.connections[token].connect(); self.send_stat(token, Stat::SocketCreate); if self.connections[token].stream().is_some() { self.register(self.connections[token].stream().unwrap(), token); self.set_timeout(token); } else { debug!("failure reconnecting"); self.send_stat(token, Stat::ConnectError); self.set_timeout(token); // set a delay to reconnect } } /// write bytes to connection /// - reconnect on failure /// - transition to Reading if entire buffer written in one call fn write(&mut self, token: Token, work: Vec<u8>) { trace!("send to {:?}", token); self.send_stat(token, Stat::SocketWrite); self.times[token.0] = self.clocksource.counter(); if self.connections[token].write(work).is_ok() { self.set_timeout(token); if let Some(s) = self.connections[token].stream() { self.register(s, token); } if self.connections[token].is_readable() { self.send_stat(token, Stat::RequestSent); } } else { debug!("couldn't write"); self.send_stat(token, Stat::ConnectError); self.reconnect(token); } } /// idle connection /// - reconnect on failure /// - transition to Reading if entire buffer written in one call fn idle(&mut self, token: Token) { trace!("idle {:?}", token); if let Some(s) = self.connections[token].stream() { self.register(s, token); } } /// read and parse response /// - reconnect on failure /// - transition to Writing when response is complete fn read(&mut self, token: Token) { if let Ok(response) = self.connections[token].read() { if !response.is_empty() { let t0 = self.times[token.0]; let t1 = self.rtimes[token.0]; let parsed = self.protocol.parse(&response); let status = match parsed { ParsedResponse::Ok => Stat::ResponseOk, ParsedResponse::Hit => { let _ = self.stats.send(Sample::new(t0, t1, Stat::ResponseOk)); Stat::ResponseOkHit } ParsedResponse::Miss => { let _ = self.stats.send(Sample::new(t0, t1, Stat::ResponseOk)); Stat::ResponseOkMiss } _ => Stat::ResponseError, }; if parsed != ParsedResponse::Incomplete { let _ = self.stats.send(Sample::new(t0, t1, status.clone())); trace!("switch to established"); self.clear_timer(token); self.set_state(token, State::Established); self.idle(token); } } } else { debug!("read error. reconnect"); self.send_stat(token, Stat::ConnectError); self.reconnect(token); } } /// timeout handler /// - reconnect always fn timeout(&mut self, token: Token) { debug!("timeout {:?}", token); match self.state(token) { State::Connecting => { self.send_stat(token, Stat::ConnectTimeout); self.reconnect(token); } State::Closed => { self.reconnect(token); } State::Established => error!("timeout for State::Established"), State::Reading => { self.send_stat(token, Stat::ResponseTimeout); self.reconnect(token); } State::Writing => { error!("timeout for State::Writing"); } } } /// write remaining buffer to underlying stream for token /// - reconnect on failure /// - transition to Reading when write buffer depleated fn flush(&mut self, token: Token) { trace!("flush {:?}", token); self.times[token.0] = self.clocksource.counter(); if self.connections[token].flush().is_ok() { if let Some(s) = self.connections[token].stream() { self.register(s, token); } } else { self.send_stat(token, Stat::ConnectError); self.reconnect(token); } } /// try to send the next request using the given token /// - requeue in front if no work to send /// - halt: if the connection isn't actually writable fn try_send(&mut self, token: Token) { if self.connections[token].is_writable() { if let Some(work) = self.queue.pop() { trace!("send {:?}", token); self.write(token, work); } else { self.ready.push_front(token); } } else { halt!( "internal state error. dispatch to non-writable {:?}", self.state(token) ); } } fn send_stat(&mut self, token: Token, stat: Stat) { let t0 = self.times[token.0]; let t1 = self.clocksource.counter(); let _ = self.stats.send(Sample::new(t0, t1, stat)); } fn clear_timer(&mut self, token: Token) { self.connections[token].set_timeout(None); } /// event handler for connections fn connection_ready(&mut self, token: Token, event: mio::Event) { if self.connections[token].is_connecting() { if UnixReady::from(event.readiness()).is_hup() { debug!("hangup on connect {:?}", token); self.send_stat(token, Stat::ConnectError); self.reconnect(token); return; } else { trace!("connection established {:?}", token); self.send_stat(token, Stat::ConnectOk); self.clear_timer(token); self.set_state(token, State::Writing); self.ready.push_back(token); } } else { if UnixReady::from(event.readiness()).is_hup() { debug!("server hangup {:?}", token); self.reconnect(token); return; } match self.state(token) { State::Established => { trace!("ready to write {:?}", token); self.send_stat(token, Stat::SocketRead); self.set_state(token, State::Writing); self.ready.push_back(token); } State::Reading => { trace!("reading {:?}", token); self.send_stat(token, Stat::SocketRead); self.read(token); } State::Writing => { trace!("writing {:?}", token); self.send_stat(token, Stat::SocketFlush); self.flush(token); } _ => {} } } } /// poll for events and handle them pub fn poll(&mut self) { let time = self.clocksource.counter(); for i in 0..self.connections.len() { if let Some(timeout) = self.connections[Token(i)].get_timeout() { if time >= timeout { self.timeout(Token(i)); } } } let mut events = self .events .take() .unwrap_or_else(|| Events::with_capacity(MAX_EVENTS)); self.poll .poll(&mut events, Some(Duration::from_millis(TICK_MS))) .unwrap(); let mut rtokens = Vec::new(); for event in events.iter() { let token = event.token(); if token.0 <= MAX_CONNECTIONS { trace!("connection ready {:?}", token); self.rtimes[token.0] = self.clocksource.counter(); rtokens.push((token, event)); } else { halt!("unknown token: {:?}", token); } } for (token, event) in rtokens { self.connection_ready(token, event); } for _ in 0..self.ready.len() { let token = self.ready.pop_front().unwrap(); self.try_send(token); } self.events = Some(events); } /// spins on the poll() function to continuously poll for events pub fn run(&mut self) { loop { self.poll(); } } /// returns a synchronous sender for pushing requests to the connection pub fn tx(&self) -> Queue<Vec<u8>> { self.queue.clone() } }
{ self.connections[token].event_set() }
filter_bed.py
#!/usr/bin/env python3 """Filter bed-12 file. Remove: - incomplete annotations - genes without CDS """ import argparse import sys import re from collections import Counter try: from modules.common import die from modules.common import eprint except ImportError: from common import die from commom import eprint __author__ = "Bogdan Kirilenko, 2020." __version__ = "1.0"
ALLOWED_CHARSET = "a-zA-Z0-9._-" ALLOWED_CHARSET_RE = rf"[^{ALLOWED_CHARSET}]" def parse_args(): """Read args, check.""" app = argparse.ArgumentParser() app.add_argument("input", help="Bed-12 formatted annotation track.") app.add_argument( "output", default="stdout", help="Output destination, stdout as default" ) app.add_argument( "--out_of_frame", action="store_true", dest="out_of_frame", help="Do not skip out-of-frame genes.", ) # print help if there are no args if len(sys.argv) < 2: app.print_help() sys.exit(0) args = app.parse_args() return args def prepare_bed_file(bed_file, output, ouf=False, save_rejected=None, only_chrom=None): """Filter the bed file given and save the updated version.""" new_lines = [] # keep updated lines rejected = [] # keep IDs of skipped transcripts + the reason why names = Counter() # we need to make sure that all names are unique allowed_re = re.compile(ALLOWED_CHARSET_RE).search broken_names = [] f = open(bed_file, "r") for num, line in enumerate(f, 1): # parse bed file according to specification line_data = line.rstrip().split("\t") if len(line_data) != 12: f.close() # this is for sure an error # it is possible only if something except a bed12 was provided die( "Error! Bed 12 file is required! Got a file with {len(line_data)} fields instead" ) chrom = line_data[0] if only_chrom and chrom != only_chrom: # TOGA allows to perform the analysis on a specific chromosome only # is so, we can skip all transcripts that located on other chromosomes continue chromStart = int(line_data[1]) chromEnd = int(line_data[2]) name = line_data[3] # gene_name usually corr_name = not bool(allowed_re(name)) if corr_name is False: broken_names.append(name) # TODO: check weird characters in the transcript name # bed_score = int(line_data[4]) # never used # strand = line_data[5] # otherwise: # strand = True if line_data[5] == '+' else False thickStart = int(line_data[6]) thickEnd = int(line_data[7]) # itemRgb = line_data[8] # never used blockCount = int(line_data[9]) blockSizes = [int(x) for x in line_data[10].split(",") if x != ""] blockStarts = [int(x) for x in line_data[11].split(",") if x != ""] blockEnds = [blockStarts[i] + blockSizes[i] for i in range(blockCount)] blockAbsStarts = [blockStarts[i] + chromStart for i in range(blockCount)] blockAbsEnds = [blockEnds[i] + chromStart for i in range(blockCount)] blockNewStarts, blockNewEnds = [], [] names[name] += 1 if thickStart > thickEnd: f.close() # according to bed12 specification this should never happen sys.stderr.write(f"Problem occurred at line {num}, gene {name}\n") die("Error! Bed file is corrupted, thickEnd MUST be >= thickStart") elif thickStart == thickEnd: # this means that this is a non-coding transcript # TOGA cannot process them: we can skip it rejected.append((name, "No CDS")) continue if thickStart < chromStart or thickEnd > chromEnd: # a very strange (but still possible) case f.close() # for sure an error with input data sys.stderr.write(f"Problem occurred at line {num}, gene {name}\n") die("Error! Bed file is corrupted, thickRange is outside chromRange!") # now select CDS only # we keep UTRs in the filtered file # however, we need CDS to check whether it's correct (% 3 == 0) for block_num in range(blockCount): blockStart = blockAbsStarts[block_num] blockEnd = blockAbsEnds[block_num] # skip the block if it is entirely UTR if blockEnd <= thickStart: continue elif blockStart >= thickEnd: continue # if we are here: this is not an entirely UTR exon # it might intersect the CDS border or to be in the CDS entirely # remove UTRs: block start must be >= CDS_start (thickStart) # block end must be <= CDS_end (thickEnd) blockNewStart = blockStart if blockStart >= thickStart else thickStart blockNewEnd = blockEnd if blockEnd <= thickEnd else thickEnd blockNewStarts.append(blockNewStart - thickStart) blockNewEnds.append(blockNewEnd - thickStart) if len(blockNewStarts) == 0: # even it thickStart != thickEnd this transcript still can be non-coding # but if there are no blocks in the CDS -> we can catch this rejected.append((name, "No CDS")) continue block_new_count = len(blockNewStarts) blockNewSizes = [ blockNewEnds[i] - blockNewStarts[i] for i in range(block_new_count) ] if sum(blockNewSizes) % 3 != 0 and not ouf: # this is an out-of-frame (or incomplete transcript) # ideally CDS length should be divisible by 3 # not ouf means that we like to keep such transcripts for some reason rejected.append((name, "Out-of-frame gene")) continue # we keep this transcript: add in to the list new_line = "\t".join([str(x) for x in line_data]) new_lines.append(new_line) f.close() # if not allowed characters in transcript names: list them if len(broken_names) > 0: eprint("Error! Some transcript names contain not allowed characters") for t in broken_names: eprint(t) die(f"Allowed characters are: {ALLOWED_CHARSET}") # if there are non-unique transcript IDs: die # I kill it there, not earlier to show them altogether if any(v > 1 for v in names.values()): eprint("Error! There are non-uniq transcript IDs:") duplicates = [k for k, v in names.items() if v > 1] for d in duplicates: eprint(d) die("Abort") if len(new_lines) == 0: # no transcripts pass the filter: probably an input data mistake sys.exit( f"Error! No reference annotation tracks left after filtering procedure! Abort" ) # write transcripts that passed the filter to the output file f = open(output, "w") if output != "stdout" else sys.stdout f.write("\n".join(new_lines) + "\n") f.close() if output != "stdout" else None if save_rejected: # save transcripts that didn't pass the filter + reason why f = open(save_rejected, "w") for elem in rejected: f.write(f"{elem[0]}\t{elem[1]}\n") f.close() def main(): """Entry point.""" args = parse_args() prepare_bed_file(args.input, args.output, args.out_of_frame) sys.exit(0) if __name__ == "__main__": main()
__email__ = "[email protected]" __credits__ = ["Michael Hiller", "Virag Sharma", "David Jebb"]
config.js
// var T = {}; /** * Data serialization classes using Backbone framework */ // T.bb = {}; // T.bb.models = {}; // T.bb.collections = {}; /** * Configuration parameters for application */ var config = {
grid_size: { x_min: 0, x_max: 8, y_min: 0, y_max: 8 }, lower_left: true }; module.exports = config;
canvas_size: { width: 800, height: 600 },
collection-misc.tap.js
/* * Copyright 2020 New Relic Corporation. All rights reserved. * SPDX-License-Identifier: Apache-2.0 */ 'use strict' const common = require('./collection-common') const mongoPackage = require('mongodb/package.json') const semver = require('semver') function verifyAggregateData(t, data) { t.equal(data.length, 3, 'should have expected amount of results') t.same(data, [{ value: 5 }, { value: 15 }, { value: 25 }], 'should have expected results') } if (semver.satisfies(mongoPackage.version, '<4')) { common.test('aggregate', function aggregateTest(t, collection, verify) { collection.aggregate( [ { $sort: { i: 1 } }, { $match: { mod10: 5 } }, { $limit: 3 }, { $project: { value: '$i', _id: 0 } } ], function onResult(err, cursor) { if (!cursor) { t.fail('No data retrieved!') verify(err) } else if (cursor instanceof Array) { verifyAggregateData(t, cursor) verify( err, ['Datastore/statement/MongoDB/testCollection/aggregate', 'Callback: onResult'], ['aggregate'] ) } else { cursor.toArray(function onResult2(err, data) { verifyAggregateData(t, data) verify( err, [ 'Datastore/statement/MongoDB/testCollection/aggregate', 'Callback: onResult', 'Datastore/statement/MongoDB/testCollection/toArray', 'Callback: onResult2' ], ['aggregate', 'toArray'] ) }) } } ) }) } else { common.test('aggregate v4', async function aggregateTest(t, collection, verify) { const data = await collection .aggregate([ { $sort: { i: 1 } }, { $match: { mod10: 5 } }, { $limit: 3 }, { $project: { value: '$i', _id: 0 } } ]) .toArray() verifyAggregateData(t, data) verify( null, [ 'Datastore/statement/MongoDB/testCollection/aggregate', 'Datastore/statement/MongoDB/testCollection/toArray' ], ['aggregate', 'toArray'], 2 ) }) } common.test('bulkWrite', function bulkWriteTest(t, collection, verify) { collection.bulkWrite( [{ deleteMany: { filter: {} } }, { insertOne: { document: { a: 1 } } }], { ordered: true, w: 1 }, onWrite ) function onWrite(err, data) { t.error(err) t.equal(data.insertedCount, 1) t.equal(data.deletedCount, 30) verify( null, ['Datastore/statement/MongoDB/testCollection/bulkWrite', 'Callback: onWrite'], ['bulkWrite'] ) } }) common.test('count', function countTest(t, collection, verify) { collection.count(function onCount(err, data) { t.error(err) t.equal(data, 30) verify( null, ['Datastore/statement/MongoDB/testCollection/count', 'Callback: onCount'], ['count'] ) }) }) common.test('distinct', function distinctTest(t, collection, verify) { collection.distinct('mod10', function done(err, data) { t.error(err) t.same(data.sort(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) verify( null, ['Datastore/statement/MongoDB/testCollection/distinct', 'Callback: done'], ['distinct'] ) }) }) common.test('drop', function dropTest(t, collection, verify) { collection.drop(function done(err, data) { t.error(err) t.equal(data, true) verify(null, ['Datastore/statement/MongoDB/testCollection/drop', 'Callback: done'], ['drop']) }) }) if (semver.satisfies(mongoPackage.version, '<3')) { common.test('geoNear', function geoNearTest(t, collection, verify) { collection.ensureIndex({ loc: '2d' }, { bucketSize: 1 }, indexed) function indexed(err) { t.error(err) collection.geoNear(20, 20, { maxDistance: 5 }, done) } function done(err, data) { t.error(err) t.equal(data.ok, 1) t.equal(data.results.length, 2) t.equal(data.results[0].obj.i, 21) t.equal(data.results[1].obj.i, 17) t.same(data.results[0].obj.loc, [21, 21]) t.same(data.results[1].obj.loc, [17, 17]) t.equal(data.results[0].dis, 1.4142135623730951) t.equal(data.results[1].dis, 4.242640687119285) verify( null, [ 'Datastore/statement/MongoDB/testCollection/ensureIndex', 'Callback: indexed', 'Datastore/statement/MongoDB/testCollection/geoNear', 'Callback: done' ], ['ensureIndex', 'geoNear'] ) } }) } common.test('isCapped', function isCappedTest(t, collection, verify) { collection.isCapped(function done(err, data) { t.error(err) t.notOk(data) verify( null, ['Datastore/statement/MongoDB/testCollection/isCapped', 'Callback: done'], ['isCapped'] ) }) }) common.test('mapReduce', function mapReduceTest(t, collection, verify) { collection.mapReduce(map, reduce, { out: { inline: 1 } }, done) function done(err, data) { t.error(err) const expectedData = [ { _id: 0, value: 30 }, { _id: 1, value: 33 }, { _id: 2, value: 36 }, { _id: 3, value: 39 }, { _id: 4, value: 42 }, { _id: 5, value: 45 }, { _id: 6, value: 48 }, { _id: 7, value: 51 }, { _id: 8, value: 54 }, { _id: 9, value: 57 } ] // data is not sorted depending on speed of // db calls, sort to compare vs expected collection data.sort((a, b) => a._id - b._id) t.same(data, expectedData) verify( null, ['Datastore/statement/MongoDB/testCollection/mapReduce', 'Callback: done'], ['mapReduce'] ) } /* eslint-disable */ function map(obj) { emit(this.mod10, this.i) } /* eslint-enable */
function reduce(key, vals) { return vals.reduce(function sum(prev, val) { return prev + val }, 0) } }) common.test('options', function optionsTest(t, collection, verify) { collection.options(function done(err, data) { t.error(err) // Depending on the version of the mongo server this will change. if (data) { t.same(data, {}, 'should have expected results') } else { t.notOk(data, 'should have expected results') } verify( null, ['Datastore/statement/MongoDB/testCollection/options', 'Callback: done'], ['options'] ) }) }) if (semver.satisfies(mongoPackage.version, '<4')) { common.test('parallelCollectionScan', function (t, collection, verify) { collection.parallelCollectionScan({ numCursors: 1 }, function done(err, cursors) { t.error(err) cursors[0].toArray(function toArray(err, items) { t.error(err) t.equal(items.length, 30) const total = items.reduce(function sum(prev, item) { return item.i + prev }, 0) t.equal(total, 435) verify( null, [ 'Datastore/statement/MongoDB/testCollection/parallelCollectionScan', 'Callback: done', 'Datastore/statement/MongoDB/testCollection/toArray', 'Callback: toArray' ], ['parallelCollectionScan', 'toArray'] ) }) }) }) common.test('geoHaystackSearch', function haystackSearchTest(t, collection, verify) { collection.ensureIndex({ loc: 'geoHaystack', type: 1 }, { bucketSize: 1 }, indexed) function indexed(err) { t.error(err) collection.geoHaystackSearch(15, 15, { maxDistance: 5, search: {} }, done) } function done(err, data) { t.error(err) t.equal(data.ok, 1) t.equal(data.results.length, 2) t.equal(data.results[0].i, 13) t.equal(data.results[1].i, 17) t.same(data.results[0].loc, [13, 13]) t.same(data.results[1].loc, [17, 17]) verify( null, [ 'Datastore/statement/MongoDB/testCollection/ensureIndex', 'Callback: indexed', 'Datastore/statement/MongoDB/testCollection/geoHaystackSearch', 'Callback: done' ], ['ensureIndex', 'geoHaystackSearch'] ) } }) common.test('group', function groupTest(t, collection, verify) { collection.group(['mod10'], {}, { count: 0, total: 0 }, count, done) function done(err, data) { t.error(err) t.same(data.sort(sort), [ { mod10: 0, count: 3, total: 30 }, { mod10: 1, count: 3, total: 33 }, { mod10: 2, count: 3, total: 36 }, { mod10: 3, count: 3, total: 39 }, { mod10: 4, count: 3, total: 42 }, { mod10: 5, count: 3, total: 45 }, { mod10: 6, count: 3, total: 48 }, { mod10: 7, count: 3, total: 51 }, { mod10: 8, count: 3, total: 54 }, { mod10: 9, count: 3, total: 57 } ]) verify( null, ['Datastore/statement/MongoDB/testCollection/group', 'Callback: done'], ['group'] ) } function count(obj, prev) { prev.total += obj.i prev.count++ } function sort(a, b) { return a.mod10 - b.mod10 } }) } common.test('rename', function renameTest(t, collection, verify) { collection.rename('testCollection2', function done(err) { t.error(err) verify( null, ['Datastore/statement/MongoDB/testCollection/rename', 'Callback: done'], ['rename'] ) }) }) common.test('stats', function statsTest(t, collection, verify) { collection.stats({ i: 5 }, function done(err, data) { t.error(err) t.equal(data.ns, common.DB_NAME + '.testCollection') t.equal(data.count, 30) t.equal(data.ok, 1) verify(null, ['Datastore/statement/MongoDB/testCollection/stats', 'Callback: done'], ['stats']) }) })
table_digitalocean_balance.go
package digitalocean import ( "context" "github.com/turbot/steampipe-plugin-sdk/grpc/proto" "github.com/turbot/steampipe-plugin-sdk/plugin" ) func tableDigitalOceanBalance(ctx context.Context) *plugin.Table { return &plugin.Table{ Name: "digitalocean_balance", Description: "Balance information for the current account.", List: &plugin.ListConfig{ Hydrate: listBalance, }, Columns: []*plugin.Column{ {Name: "account_balance", Type: proto.ColumnType_DOUBLE, Description: "Current balance of the customer's most recent billing activity. Does not reflect month_to_date_usage."}, {Name: "generated_at", Type: proto.ColumnType_TIMESTAMP, Description: "The time at which balances were most recently generated."}, {Name: "month_to_date_balance", Type: proto.ColumnType_DOUBLE, Description: "Balance as of the generated_at time. This value includes the account_balance and month_to_date_usage."}, {Name: "month_to_date_usage", Type: proto.ColumnType_DOUBLE, Description: "Amount used in the current billing period as of the generated_at time."}, }, } } func listBalance(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) { conn, err := connect(ctx, d) if err != nil { plugin.Logger(ctx).Error("digitalocean_balance.listBalance", "connection_error", err) return nil, err } balance, resp, err := conn.Balance.Get(ctx) if err != nil { plugin.Logger(ctx).Error("digitalocean_balance.listBalance", "query_error", err, "resp", resp) return nil, err } d.StreamListItem(ctx, balance) return nil, nil
}
TestAptChefProvisionerPlugin.py
# -*- coding: utf-8 -*- # # # Copyright 2013 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import logging import json from aminator.config import Config from aminator.plugins.provisioner.apt_chef import AptChefProvisionerPlugin log = logging.getLogger(__name__) console = logging.StreamHandler() # add the handler to the root logger logging.getLogger('').addHandler(console) class TestAptChefProvisionerPlugin(object): def setup_method(self, method): self.chef_provisioner = AptChefProvisionerPlugin() self.chef_provisioner._config = Config() self.chef_provisioner._config.context = Config() self.chef_provisioner._config.context.chef = Config() self.chef_provisioner._config.context.package = Config() self.chef_provisioner._config.pkg_attributes = ['name', 'version', 'release', 'build_job', 'build_number'] self.chef_provisioner._config.context.chef.dir = "./tests" self.chef_provisioner._config.context.chef.json = "test_chef_node.json" def test_parse_json(self): # given a JSON doc, what's the name, version, release string, etc # this is more a direct test of the ChefJSON mapping with open(self.chef_provisioner._get_chef_json_full_path()) as chef_json_file: my_json = json.load(chef_json_file) assert "helloworld" == my_json['name'] assert "APP-helloworld" == my_json['build_job'] assert "1.0" == my_json['version'] assert "277" == my_json['release'] assert "33a9d1cac7686c8a46c1f330add2e8d36850fd15" == my_json['change'] assert isinstance(my_json['run_list'], list) assert "recipe[helloworld]" == my_json['run_list'][0] def
(self): self.chef_provisioner._store_package_metadata() assert "helloworld" == self.chef_provisioner._config.context.package.attributes['name'] assert "1.0" == self.chef_provisioner._config.context.package.attributes['version'] assert "277" == self.chef_provisioner._config.context.package.attributes['release'] assert "APP-helloworld" == self.chef_provisioner._config.context.package.attributes['build_job'] assert "277" == self.chef_provisioner._config.context.package.attributes['build_number']
test_metadata
expr.rs
use super::{Parser, PResult, Restrictions, PrevTokenKind, TokenType, PathStyle}; use super::{BlockMode, SemiColonMode}; use super::{SeqSep, TokenExpectType}; use super::pat::{GateOr, PARAM_EXPECTED}; use crate::maybe_recover_from_interpolated_ty_qpath; use crate::ptr::P; use crate::ast::{self, Attribute, AttrStyle, Ident, CaptureBy, BlockCheckMode}; use crate::ast::{Expr, ExprKind, RangeLimits, Label, Movability, IsAsync, Arm}; use crate::ast::{Ty, TyKind, FunctionRetTy, Arg, FnDecl}; use crate::ast::{BinOpKind, BinOp, UnOp}; use crate::ast::{Mac, AnonConst, Field}; use crate::parse::classify; use crate::parse::token::{self, Token}; use crate::parse::diagnostics::{Error}; use crate::print::pprust; use crate::source_map::{self, Span}; use crate::symbol::{kw, sym}; use crate::util::parser::{AssocOp, Fixity, prec_let_scrutinee_needs_par}; use std::mem; use errors::Applicability; use rustc_data_structures::thin_vec::ThinVec; /// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => { if let token::Interpolated(nt) = &$p.token.kind { match &**nt { token::NtExpr(e) | token::NtLiteral(e) => { let e = e.clone(); $p.bump(); return Ok(e); } token::NtPath(path) => { let path = path.clone(); $p.bump(); return Ok($p.mk_expr( $p.token.span, ExprKind::Path(None, path), ThinVec::new() )); } token::NtBlock(block) => { let block = block.clone(); $p.bump(); return Ok($p.mk_expr( $p.token.span, ExprKind::Block(block, None), ThinVec::new() )); } // N.B: `NtIdent(ident)` is normalized to `Ident` in `fn bump`. _ => {}, }; } } } #[derive(Debug)] pub(super) enum LhsExpr { NotYetParsed, AttributesParsed(ThinVec<Attribute>), AlreadyParsed(P<Expr>), } impl From<Option<ThinVec<Attribute>>> for LhsExpr { fn from(o: Option<ThinVec<Attribute>>) -> Self { if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed } } } impl From<P<Expr>> for LhsExpr { fn from(expr: P<Expr>) -> Self { LhsExpr::AlreadyParsed(expr) } } impl<'a> Parser<'a> { /// Parses an expression. #[inline] pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { self.parse_expr_res(Restrictions::empty(), None) } fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> { self.parse_paren_comma_seq(|p| { match p.parse_expr() { Ok(expr) => Ok(expr), Err(mut err) => match p.token.kind { token::Ident(name, false) if name == kw::Underscore && p.look_ahead(1, |t| { t == &token::Comma }) => { // Special-case handling of `foo(_, _, _)` err.emit(); let sp = p.token.span; p.bump(); Ok(p.mk_expr(sp, ExprKind::Err, ThinVec::new())) } _ => Err(err), }, } }).map(|(r, _)| r) } /// Parses an expression, subject to the given restrictions. #[inline] pub(super) fn parse_expr_res( &mut self, r: Restrictions, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs)) } /// Parses an associative expression. /// /// This parses an expression accounting for associativity and precedence of the operators in /// the expression. #[inline] fn parse_assoc_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>>, ) -> PResult<'a, P<Expr>> { self.parse_assoc_expr_with(0, already_parsed_attrs.into()) } /// Parses an associative expression with operators of at least `min_prec` precedence. pub(super) fn parse_assoc_expr_with( &mut self, min_prec: usize, lhs: LhsExpr, ) -> PResult<'a, P<Expr>> { let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { expr } else { let attrs = match lhs { LhsExpr::AttributesParsed(attrs) => Some(attrs), _ => None, }; if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) { return self.parse_prefix_range_expr(attrs); } else { self.parse_prefix_expr(attrs)? } }; let last_type_ascription_set = self.last_type_ascription.is_some(); match (self.expr_is_complete(&lhs), AssocOp::from_token(&self.token)) { (true, None) => { self.last_type_ascription = None; // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071 return Ok(lhs); } (false, _) => {} // continue parsing the expression // An exhaustive check is done in the following block, but these are checked first // because they *are* ambiguous but also reasonable looking incorrect syntax, so we // want to keep their span info to improve diagnostics in these cases in a later stage. (true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3` (true, Some(AssocOp::Subtract)) | // `{ 42 } -5` (true, Some(AssocOp::LAnd)) | // `{ 42 } &&x` (#61475) (true, Some(AssocOp::Add)) // `{ 42 } + 42 // If the next token is a keyword, then the tokens above *are* unambiguously incorrect: // `if x { a } else { b } && if y { c } else { d }` if !self.look_ahead(1, |t| t.is_reserved_ident()) => { self.last_type_ascription = None; // These cases are ambiguous and can't be identified in the parser alone let sp = self.sess.source_map().start_point(self.token.span); self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span); return Ok(lhs); } (true, Some(ref op)) if !op.can_continue_expr_unambiguously() => { self.last_type_ascription = None; return Ok(lhs); } (true, Some(_)) => { // We've found an expression that would be parsed as a statement, but the next // token implies this should be parsed as an expression. // For example: `if let Some(x) = x { x } else { 0 } / 2` let mut err = self.struct_span_err(self.token.span, &format!( "expected expression, found `{}`", pprust::token_to_string(&self.token), )); err.span_label(self.token.span, "expected expression"); self.sess.expr_parentheses_needed( &mut err, lhs.span, Some(pprust::expr_to_string(&lhs), )); err.emit(); } } self.expected_tokens.push(TokenType::Operator); while let Some(op) = AssocOp::from_token(&self.token) { // Adjust the span for interpolated LHS to point to the `$lhs` token and not to what // it refers to. Interpolated identifiers are unwrapped early and never show up here // as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process // it as "interpolated", it doesn't change the answer for non-interpolated idents. let lhs_span = match (self.prev_token_kind, &lhs.node) { (PrevTokenKind::Interpolated, _) => self.prev_span, (PrevTokenKind::Ident, &ExprKind::Path(None, ref path)) if path.segments.len() == 1 => self.prev_span, _ => lhs.span, }; let cur_op_span = self.token.span; let restrictions = if op.is_assign_like() { self.restrictions & Restrictions::NO_STRUCT_LITERAL } else { self.restrictions }; let prec = op.precedence(); if prec < min_prec { break; } // Check for deprecated `...` syntax if self.token == token::DotDotDot && op == AssocOp::DotDotEq { self.err_dotdotdot_syntax(self.token.span); } if self.token == token::LArrow { self.err_larrow_operator(self.token.span); } self.bump(); if op.is_comparison() { self.check_no_chained_comparison(&lhs, &op); } // Special cases: if op == AssocOp::As { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue } else if op == AssocOp::Colon { let maybe_path = self.could_ascription_be_path(&lhs.node); self.last_type_ascription = Some((self.prev_span, maybe_path)); lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?; continue } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. // // We have 2 alternatives here: `x..y`/`x..=y` and `x..`/`x..=` The other // two variants are handled with `parse_prefix_range_expr` call above. let rhs = if self.is_at_start_of_range_notation_rhs() { Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?) } else { None }; let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs { x.span } else { cur_op_span }); let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = self.mk_range(Some(lhs), rhs, limits)?; lhs = self.mk_expr(lhs_span.to(rhs_span), r, ThinVec::new()); break } let fixity = op.fixity(); let prec_adjustment = match fixity { Fixity::Right => 0, Fixity::Left => 1, // We currently have no non-associative operators that are not handled above by // the special cases. The code is here only for future convenience. Fixity::None => 1, }; let rhs = self.with_res( restrictions - Restrictions::STMT_EXPR, |this| this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed) )?; // Make sure that the span of the parent node is larger than the span of lhs and rhs, // including the attributes. let lhs_span = lhs .attrs .iter() .filter(|a| a.style == AttrStyle::Outer) .next() .map_or(lhs_span, |a| a.span); let span = lhs_span.to(rhs.span); lhs = match op { AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | AssocOp::Greater | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary, ThinVec::new()) } AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs), ThinVec::new()), AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, token::Minus => BinOpKind::Sub, token::Star => BinOpKind::Mul, token::Slash => BinOpKind::Div, token::Percent => BinOpKind::Rem, token::Caret => BinOpKind::BitXor, token::And => BinOpKind::BitAnd, token::Or => BinOpKind::BitOr, token::Shl => BinOpKind::Shl, token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr, ThinVec::new()) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.bug("AssocOp should have been handled by special case") } }; if let Fixity::None = fixity { break } } if last_type_ascription_set { self.last_type_ascription = None; } Ok(lhs) } /// Checks if this expression is a successfully parsed statement. fn expr_is_complete(&self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); } true } else { false } } /// Parse prefix-forms of range notation: `..expr`, `..`, `..=expr` fn parse_prefix_range_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { // Check for deprecated `...` syntax if self.token == token::DotDotDot { self.err_dotdotdot_syntax(self.token.span); } debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind), "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", self.token); let tok = self.token.clone(); let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.token.span; let mut hi = self.token.span; self.bump(); let opt_end = if self.is_at_start_of_range_notation_rhs() { // RHS must be parsed with more associativity than the dots. let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1; Some(self.parse_assoc_expr_with(next_prec, LhsExpr::NotYetParsed) .map(|x| { hi = x.span; x })?) } else { None }; let limits = if tok == token::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = self.mk_range(None, opt_end, limits)?; Ok(self.mk_expr(lo.to(hi), r, attrs)) } /// Parse a prefix-unary-operator expr fn parse_prefix_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.token.span; // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr() let (hi, ex) = match self.token.kind { token::Not => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } // Suggest `!` for bitwise negation when encountering a `~` token::Tilde => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; let span_of_tilde = lo; self.struct_span_err(span_of_tilde, "`~` cannot be used as a unary operator") .span_suggestion_short( span_of_tilde, "use `!` to perform bitwise negation", "!".to_owned(), Applicability::MachineApplicable ) .emit(); (lo.to(span), self.mk_unary(UnOp::Not, e)) } token::BinOp(token::Minus) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Neg, e)) } token::BinOp(token::Star) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Deref, e)) } token::BinOp(token::And) | token::AndAnd => { self.expect_and()?; let m = self.parse_mutability(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::AddrOf(m, e)) } token::Ident(..) if self.token.is_keyword(kw::Box) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::Box(e)) } token::Ident(..) if self.token.is_ident_named(sym::not) => { // `not` is just an ordinary identifier in Rust-the-language, // but as `rustc`-the-compiler, we can issue clever diagnostics // for confused users who really want to say `!` let token_cannot_continue_expr = |t: &Token| match t.kind { // These tokens can start an expression after `!`, but // can't continue an expression after an ident token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw), token::Literal(..) | token::Pound => true, _ => t.is_whole_expr(), }; let cannot_continue_expr = self.look_ahead(1, token_cannot_continue_expr); if cannot_continue_expr { self.bump(); // Emit the error ... self.struct_span_err( self.token.span, &format!("unexpected {} after identifier",self.this_token_descr()) ) .span_suggestion_short( // Span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion self.sess.source_map() .span_until_non_whitespace(lo.to(self.token.span)), "use `!` to perform logical negation", "!".to_owned(), Applicability::MachineApplicable ) .emit(); // —and recover! (just as if we were in the block // for the `token::Not` arm) let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } else { return self.parse_dot_or_call_expr(Some(attrs)); } } _ => { return self.parse_dot_or_call_expr(Some(attrs)); } }; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } /// Returns the span of expr, if it was not interpolated or the span of the interpolated token. fn interpolated_or_expr_span( &self, expr: PResult<'a, P<Expr>>, ) -> PResult<'a, (Span, P<Expr>)> { expr.map(|e| { if self.prev_token_kind == PrevTokenKind::Interpolated { (self.prev_span, e) } else { (e.span, e) } }) } fn parse_assoc_op_cast(&mut self, lhs: P<Expr>, lhs_span: Span, expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind) -> PResult<'a, P<Expr>> { let mk_expr = |this: &mut Self, rhs: P<Ty>| { this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), ThinVec::new()) }; // Save the state of the parser before parsing type normally, in case there is a // LessThan comparison after this cast. let parser_snapshot_before_type = self.clone(); match self.parse_ty_no_plus() { Ok(rhs) => { Ok(mk_expr(self, rhs)) } Err(mut type_err) => { // Rewind to before attempting to parse the type with generics, to recover // from situations like `x as usize < y` in which we first tried to parse // `usize < y` as a type with generic arguments. let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); match self.parse_path(PathStyle::Expr) { Ok(path) => { let (op_noun, op_verb) = match self.token.kind { token::Lt => ("comparison", "comparing"), token::BinOp(token::Shl) => ("shift", "shifting"), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, // but `parse_path` returns `Ok` on them due to error recovery. // Return original error and parser state. mem::replace(self, parser_snapshot_after_type); return Err(type_err); } }; // Successfully parsed the type path leaving a `<` yet to parse. type_err.cancel(); // Report non-fatal diagnostics, keep `x as usize` as an expression // in AST and continue parsing. let msg = format!("`<` is interpreted as a start of generic \ arguments for `{}`, not a {}", path, op_noun); let span_after_type = parser_snapshot_after_type.token.span; let expr = mk_expr(self, P(Ty { span: path.span, node: TyKind::Path(None, path), id: ast::DUMMY_NODE_ID })); let expr_str = self.span_to_snippet(expr.span) .unwrap_or_else(|_| pprust::expr_to_string(&expr)); self.struct_span_err(self.token.span, &msg) .span_label( self.look_ahead(1, |t| t.span).to(span_after_type), "interpreted as generic arguments" ) .span_label(self.token.span, format!("not interpreted as {}", op_noun)) .span_suggestion( expr.span, &format!("try {} the cast value", op_verb), format!("({})", expr_str), Applicability::MachineApplicable ) .emit(); Ok(expr) } Err(mut path_err) => { // Couldn't parse as a path, return original error and parser state. path_err.cancel(); mem::replace(self, parser_snapshot_after_type); Err(type_err) } } } } } /// Parses `a.b` or `a(13)` or `a[4]` or just `a`. fn parse_dot_or_call_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>>, ) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let b = self.parse_bottom_expr(); let (span, b) = self.interpolated_or_expr_span(b)?; self.parse_dot_or_call_expr_with(b, span, attrs) } pub(super) fn parse_dot_or_call_expr_with( &mut self, e0: P<Expr>, lo: Span, mut attrs: ThinVec<Attribute>, ) -> PResult<'a, P<Expr>> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| expr.map(|mut expr| { attrs.extend::<Vec<_>>(expr.attrs.into()); expr.attrs = attrs; match expr.node { ExprKind::If(..) if !expr.attrs.is_empty() => { // Just point to the first attribute in there... let span = expr.attrs[0].span; self.span_err(span, "attributes are not yet allowed on `if` expressions"); } _ => {} } expr }) ) } fn parse_dot_or_call_expr_with_(&mut self, e0: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let mut e = e0; let mut hi; loop { // expr? while self.eat(&token::Question) { let hi = self.prev_span; e = self.mk_expr(lo.to(hi), ExprKind::Try(e), ThinVec::new()); } // expr.f if self.eat(&token::Dot) { match self.token.kind { token::Ident(..) => { e = self.parse_dot_suffix(e, lo)?; } token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => { let span = self.token.span; self.bump(); let field = ExprKind::Field(e, Ident::new(symbol, span)); e = self.mk_expr(lo.to(span), field, ThinVec::new()); self.expect_no_suffix(span, "a tuple index", suffix); } token::Literal(token::Lit { kind: token::Float, symbol, .. }) => { self.bump(); let fstr = symbol.as_str(); let msg = format!("unexpected token: `{}`", symbol); let mut err = self.diagnostic().struct_span_err(self.prev_span, &msg); err.span_label(self.prev_span, "unexpected token"); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; let sugg = pprust::to_string(|s| { s.popen(); s.print_expr(&e); s.s.word( "."); s.print_usize(float.trunc() as usize); s.pclose(); s.s.word("."); s.s.word(fstr.splitn(2, ".").last().unwrap().to_string()) }); err.span_suggestion( lo.to(self.prev_span), "try parenthesizing the first index", sugg, Applicability::MachineApplicable ); } return Err(err); } _ => { // FIXME Could factor this out into non_fatal_unexpected or something. let actual = self.this_token_to_string(); self.span_err(self.token.span, &format!("unexpected token: `{}`", actual)); } } continue; } if self.expr_is_complete(&e) { break; } match self.token.kind { // expr(...) token::OpenDelim(token::Paren) => { let seq = self.parse_paren_expr_seq().map(|es| { let nd = self.mk_call(e, es); let hi = self.prev_span; self.mk_expr(lo.to(hi), nd, ThinVec::new()) }); e = self.recover_seq_parse_error(token::Paren, lo, seq); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { self.bump(); let ix = self.parse_expr()?; hi = self.token.span; self.expect(&token::CloseDelim(token::Bracket))?; let index = self.mk_index(e, ix); e = self.mk_expr(lo.to(hi), index, ThinVec::new()) } _ => return Ok(e) } } return Ok(e); } /// Assuming we have just parsed `.`, continue parsing into an expression. fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { if self.token.span.rust_2018() && self.eat_keyword(kw::Await) { return self.mk_await_expr(self_arg, lo); } let segment = self.parse_path_segment(PathStyle::Expr)?; self.check_trailing_angle_brackets(&segment, token::OpenDelim(token::Paren)); Ok(match self.token.kind { token::OpenDelim(token::Paren) => { // Method call `expr.f()` let mut args = self.parse_paren_expr_seq()?; args.insert(0, self_arg); let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::MethodCall(segment, args), ThinVec::new()) } _ => { // Field access `expr.f` if let Some(args) = segment.args { self.span_err(args.span(), "field expressions may not have generic arguments"); } let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), ThinVec::new()) } }) } /// At the bottom (top?) of the precedence hierarchy, /// Parses things like parenthesized exprs, macros, `return`, etc. /// /// N.B., this does not parse outer attributes, and is private because it only works /// correctly if called from `parse_dot_or_call_expr()`. fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { maybe_recover_from_interpolated_ty_qpath!(self, true); maybe_whole_expr!(self); // Outer attributes are already parsed and will be // added to the return value after the fact. // // Therefore, prevent sub-parser from parsing // attributes by giving them a empty "already parsed" list. let mut attrs = ThinVec::new(); let lo = self.token.span; let mut hi = self.token.span; let ex: ExprKind; macro_rules! parse_lit { () => { match self.parse_lit() { Ok(literal) => { hi = self.prev_span; ex = ExprKind::Lit(literal); } Err(mut err) => { self.cancel(&mut err); return Err(self.expected_expression_found()); } } } } // Note: when adding new syntax here, don't forget to adjust TokenKind::can_begin_expr(). match self.token.kind { // This match arm is a special-case of the `_` match arm below and // could be removed without changing functionality, but it's faster // to have it here, especially for programs with large constants. token::Literal(_) => { parse_lit!() } token::OpenDelim(token::Paren) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; let mut recovered = false; while self.token != token::CloseDelim(token::Paren) { es.push(match self.parse_expr() { Ok(es) => es, Err(mut err) => { // recover from parse error in tuple list match self.token.kind { token::Ident(name, false) if name == kw::Underscore && self.look_ahead(1, |t| { t == &token::Comma }) => { // Special-case handling of `Foo<(_, _, _)>` err.emit(); let sp = self.token.span; self.bump(); self.mk_expr(sp, ExprKind::Err, ThinVec::new()) } _ => return Ok( self.recover_seq_parse_error(token::Paren, lo, Err(err)), ), } } }); recovered = self.expect_one_of( &[], &[token::Comma, token::CloseDelim(token::Paren)], )?; if self.eat(&token::Comma) { trailing_comma = true; } else { trailing_comma = false; break; } } if !recovered { self.bump(); } hi = self.prev_span; ex = if es.len() == 1 && !trailing_comma { ExprKind::Paren(es.into_iter().nth(0).unwrap()) } else { ExprKind::Tup(es) }; } token::OpenDelim(token::Brace) => { return self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs); } token::BinOp(token::Or) | token::OrOr => { return self.parse_lambda_expr(attrs); } token::OpenDelim(token::Bracket) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); if self.eat(&token::CloseDelim(token::Bracket)) { // Empty vector. ex = ExprKind::Array(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr()?; if self.eat(&token::Semi) { // Repeating array syntax: [ 0; 512 ] let count = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Repeat(first_expr, count); } else if self.eat(&token::Comma) { // Vector with two or more elements. let remaining_exprs = self.parse_seq_to_end( &token::CloseDelim(token::Bracket), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; let mut exprs = vec![first_expr]; exprs.extend(remaining_exprs); ex = ExprKind::Array(exprs); } else { // Vector with one element. self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Array(vec![first_expr]); } } hi = self.prev_span; } _ => { if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; hi = path.span; return Ok(self.mk_expr(lo.to(hi), ExprKind::Path(Some(qself), path), attrs)); } if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) { return self.parse_lambda_expr(attrs); } if self.eat_keyword(kw::If) { return self.parse_if_expr(attrs); } if self.eat_keyword(kw::For) { let lo = self.prev_span; return self.parse_for_expr(None, lo, attrs); } if self.eat_keyword(kw::While) { let lo = self.prev_span; return self.parse_while_expr(None, lo, attrs); } if let Some(label) = self.eat_label() { let lo = label.ident.span; self.expect(&token::Colon)?; if self.eat_keyword(kw::While) { return self.parse_while_expr(Some(label), lo, attrs) } if self.eat_keyword(kw::For) { return self.parse_for_expr(Some(label), lo, attrs) } if self.eat_keyword(kw::Loop) { return self.parse_loop_expr(Some(label), lo, attrs) } if self.token == token::OpenDelim(token::Brace) { return self.parse_block_expr(Some(label), lo, BlockCheckMode::Default, attrs); } let msg = "expected `while`, `for`, `loop` or `{` after a label"; let mut err = self.fatal(msg); err.span_label(self.token.span, msg); return Err(err); } if self.eat_keyword(kw::Loop) { let lo = self.prev_span; return self.parse_loop_expr(None, lo, attrs); } if self.eat_keyword(kw::Continue) { let label = self.eat_label(); let ex = ExprKind::Continue(label); let hi = self.prev_span; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } if self.eat_keyword(kw::Match) { let match_sp = self.prev_span; return self.parse_match_expr(attrs).map_err(|mut err| { err.span_label(match_sp, "while parsing this match expression"); err }); } if self.eat_keyword(kw::Unsafe) { return self.parse_block_expr( None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs); } if self.is_do_catch_block() { let mut db = self.fatal("found removed `do catch` syntax"); db.help("Following RFC #2388, the new non-placeholder syntax is `try`"); return Err(db); } if self.is_try_block() { let lo = self.token.span; assert!(self.eat_keyword(kw::Try)); return self.parse_try_block(lo, attrs); } // Span::rust_2018() is somewhat expensive; don't get it repeatedly. let is_span_rust_2018 = self.token.span.rust_2018(); if is_span_rust_2018 && self.check_keyword(kw::Async) { return if self.is_async_block() { // check for `async {` and `async move {` self.parse_async_block(attrs) } else { self.parse_lambda_expr(attrs) }; } if self.eat_keyword(kw::Return) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Ret(Some(e)); } else { ex = ExprKind::Ret(None); } } else if self.eat_keyword(kw::Break) { let label = self.eat_label(); let e = if self.token.can_begin_expr() && !(self.token == token::OpenDelim(token::Brace) && self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL)) { Some(self.parse_expr()?) } else { None }; ex = ExprKind::Break(label, e); hi = self.prev_span; } else if self.eat_keyword(kw::Yield) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Yield(Some(e)); } else { ex = ExprKind::Yield(None); } let span = lo.to(hi); self.sess.gated_spans.yields.borrow_mut().push(span); } else if self.eat_keyword(kw::Let) { return self.parse_let_expr(attrs); } else if is_span_rust_2018 && self.eat_keyword(kw::Await) { let (await_hi, e_kind) = self.parse_incorrect_await_syntax(lo, self.prev_span)?; hi = await_hi; ex = e_kind; } else if self.token.is_path_start() { let path = self.parse_path(PathStyle::Expr)?; // `!`, as an operator, is prefix, so we know this isn't that if self.eat(&token::Not) { // MACRO INVOCATION expression let (delim, tts) = self.expect_delimited_token_tree()?; hi = self.prev_span; ex = ExprKind::Mac(Mac { path, tts, delim, span: lo.to(hi), prior_type_ascription: self.last_type_ascription, }); } else if self.check(&token::OpenDelim(token::Brace)) { if let Some(expr) = self.maybe_parse_struct_expr(lo, &path, &attrs) { return expr; } else { hi = path.span; ex = ExprKind::Path(None, path); } } else { hi = path.span; ex = ExprKind::Path(None, path); } } else { if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { // Don't complain about bare semicolons after unclosed braces // recovery in order to keep the error count down. Fixing the // delimiters will possibly also fix the bare semicolon found in // expression context. For example, silence the following error: // ``` // error: expected expression, found `;` // --> file.rs:2:13 // | // 2 | foo(bar(; // | ^ expected expression // ``` self.bump(); return Ok(self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new())); } parse_lit!() } } } let expr = self.mk_expr(lo.to(hi), ex, attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`). crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); let minus_lo = self.token.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lo = self.token.span; let literal = self.parse_lit()?; let hi = self.prev_span; let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new()); if minus_present { let minus_hi = self.prev_span; let unary = self.mk_unary(UnOp::Neg, expr); Ok(self.mk_expr(minus_lo.to(minus_hi), unary, ThinVec::new())) } else { Ok(expr) } } /// Parses a block or unsafe block. crate fn parse_block_expr( &mut self, opt_label: Option<Label>, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec<Attribute>, ) -> PResult<'a, P<Expr>> { self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; attrs.extend(self.parse_inner_attributes()?); let blk = self.parse_block_tail(lo, blk_mode)?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)); } /// Parses `move |args| expr`. fn parse_lambda_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.token.span; let movability = if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable }; let asyncness = if self.token.span.rust_2018() { self.parse_asyncness() } else { IsAsync::NotAsync }; if asyncness.is_async() { // Feature gate `async ||` closures. self.sess.gated_spans.async_closure.borrow_mut().push(self.prev_span); } let capture_clause = self.parse_capture_clause(); let decl = self.parse_fn_block_decl()?; let decl_hi = self.prev_span; let body = match decl.output { FunctionRetTy::Default(_) => { let restrictions = self.restrictions - Restrictions::STMT_EXPR; self.parse_expr_res(restrictions, None)? }, _ => { // If an explicit return type is given, require a // block to appear (RFC 968). let body_lo = self.token.span; self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())? } }; Ok(self.mk_expr( lo.to(body.span), ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), attrs)) } /// Parse an optional `move` prefix to a closure lke construct. fn parse_capture_clause(&mut self) -> CaptureBy { if self.eat_keyword(kw::Move) { CaptureBy::Value } else { CaptureBy::Ref } } /// Parses the `|arg, arg|` header of a closure. fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> { let inputs_captures = { if self.eat(&token::OrOr) { Vec::new() } else { self.expect(&token::BinOp(token::Or))?; let args = self.parse_seq_to_before_tokens( &[&token::BinOp(token::Or), &token::OrOr], SeqSep::trailing_allowed(token::Comma), TokenExpectType::NoExpect, |p| p.parse_fn_block_arg() )?.0; self.expect_or()?; args } }; let output = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: inputs_captures, output, c_variadic: false })) } /// Parses an argument in a lambda header (e.g., `|arg, arg|`). fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> { let lo = self.token.span; let attrs = self.parse_arg_attributes()?; let pat = self.parse_pat(PARAM_EXPECTED)?; let t = if self.eat(&token::Colon) { self.parse_ty()? } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyKind::Infer, span: self.prev_span, }) }; let span = lo.to(self.token.span); Ok(Arg { attrs: attrs.into(), ty: t, pat, span, id: ast::DUMMY_NODE_ID }) } /// Parses an `if` expression (`if` token already eaten). fn parse_if_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; let cond = self.parse_cond_expr()?; // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then // verify that the last statement is either an implicit return (no `;`) or an explicit // return. This won't catch blocks with an explicit `return`, but that would be caught by // the dead code lint. if self.eat_keyword(kw::Else) || !cond.returns() { let sp = self.sess.source_map().next_point(lo); let mut err = self.diagnostic() .struct_span_err(sp, "missing condition for `if` expression"); err.span_label(sp, "expected if condition here"); return Err(err) } let not_block = self.token != token::OpenDelim(token::Brace); let thn = self.parse_block().map_err(|mut err| { if not_block { err.span_label(lo, "this `if` statement has a condition, but no block"); } err })?; let mut els: Option<P<Expr>> = None; let mut hi = thn.span; if self.eat_keyword(kw::Else) {
} Ok(self.mk_expr(lo.to(hi), ExprKind::If(cond, thn, els), attrs)) } /// Parse the condition of a `if`- or `while`-expression fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> { let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let ExprKind::Let(..) = cond.node { // Remove the last feature gating of a `let` expression since it's stable. let last = self.sess.gated_spans.let_chains.borrow_mut().pop(); debug_assert_eq!(cond.span, last.unwrap()); } Ok(cond) } /// Parses a `let $pat = $expr` pseudo-expression. /// The `let` token has already been eaten. fn parse_let_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; // FIXME(or_patterns, Centril | dlrobertson): use `parse_top_pat` instead. let pat = self.parse_top_pat_unpack(GateOr::No)?; self.expect(&token::Eq)?; let expr = self.with_res( Restrictions::NO_STRUCT_LITERAL, |this| this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into()) )?; let span = lo.to(expr.span); self.sess.gated_spans.let_chains.borrow_mut().push(span); Ok(self.mk_expr(span, ExprKind::Let(pat, expr), attrs)) } /// `else` token already eaten fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { if self.eat_keyword(kw::If) { return self.parse_if_expr(ThinVec::new()); } else { let blk = self.parse_block()?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), ThinVec::new())); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) fn parse_for_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` // Record whether we are about to parse `for (`. // This is used below for recovery in case of `for ( $stuff ) $block` // in which case we will suggest `for $stuff $block`. let begin_paren = match self.token.kind { token::OpenDelim(token::Paren) => Some(self.token.span), _ => None, }; let pat = self.parse_top_pat(GateOr::Yes)?; if !self.eat_keyword(kw::In) { let in_span = self.prev_span.between(self.token.span); self.struct_span_err(in_span, "missing `in` in `for` loop") .span_suggestion_short( in_span, "try adding `in` here", " in ".into(), // has been misleading, at least in the past (closed Issue #48492) Applicability::MaybeIncorrect ) .emit(); } let in_span = self.prev_span; self.check_for_for_in_in_typo(in_span); let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let pat = self.recover_parens_around_for_head(pat, &expr, begin_paren); let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let hi = self.prev_span; Ok(self.mk_expr(span_lo.to(hi), ExprKind::ForLoop(pat, expr, loop_block, opt_label), attrs)) } /// Parses a `while` or `while let` expression (`while` token already eaten). fn parse_while_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let cond = self.parse_cond_expr()?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::While(cond, body, opt_label), attrs)) } /// Parse `loop {...}`, `loop` token already eaten. fn parse_loop_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::Loop(body, opt_label), attrs)) } fn eat_label(&mut self) -> Option<Label> { if let Some(ident) = self.token.lifetime() { let span = self.token.span; self.bump(); Some(Label { ident: Ident::new(ident.name, span) }) } else { None } } // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { if self.token == token::Semi { e.span_suggestion_short( match_span, "try removing this `match`", String::new(), Applicability::MaybeIncorrect // speculative ); } return Err(e) } attrs.extend(self.parse_inner_attributes()?); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { match self.parse_arm() { Ok(arm) => arms.push(arm), Err(mut e) => { // Recover by skipping to the end of the block. e.emit(); self.recover_stmt(); let span = lo.to(self.token.span); if self.token == token::CloseDelim(token::Brace) { self.bump(); } return Ok(self.mk_expr(span, ExprKind::Match(discriminant, arms), attrs)); } } } let hi = self.token.span; self.bump(); return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs)); } crate fn parse_arm(&mut self) -> PResult<'a, Arm> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; // FIXME(or_patterns, Centril | dlrobertson): use `parse_top_pat` instead. let pat = self.parse_top_pat_unpack(GateOr::No)?; let guard = if self.eat_keyword(kw::If) { Some(self.parse_expr()?) } else { None }; let arrow_span = self.token.span; self.expect(&token::FatArrow)?; let arm_start_span = self.token.span; let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None) .map_err(|mut err| { err.span_label(arrow_span, "while parsing the `match` arm starting here"); err })?; let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) && self.token != token::CloseDelim(token::Brace); let hi = self.token.span; if require_comma { let cm = self.sess.source_map(); self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) .map_err(|mut err| { match (cm.span_to_lines(expr.span), cm.span_to_lines(arm_start_span)) { (Ok(ref expr_lines), Ok(ref arm_start_lines)) if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 && self.token == token::FatArrow => { // We check whether there's any trailing code in the parse span, // if there isn't, we very likely have the following: // // X | &Y => "y" // | -- - missing comma // | | // | arrow_span // X | &X => "x" // | - ^^ self.token.span // | | // | parsed until here as `"y" & X` err.span_suggestion_short( cm.next_point(arm_start_span), "missing a comma here to end this `match` arm", ",".to_owned(), Applicability::MachineApplicable ); } _ => { err.span_label(arrow_span, "while parsing the `match` arm starting here"); } } err })?; } else { self.eat(&token::Comma); } Ok(ast::Arm { attrs, pats: pat, // FIXME(or_patterns, Centril | dlrobertson): this should just be `pat,`. guard, body: expr, span: lo.to(hi), id: ast::DUMMY_NODE_ID, }) } /// Parses a `try {...}` expression (`try` token already eaten). fn parse_try_block( &mut self, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); if self.eat_keyword(kw::Catch) { let mut error = self.struct_span_err(self.prev_span, "keyword `catch` cannot follow a `try` block"); error.help("try using `match` on the result of the `try` block instead"); error.emit(); Err(error) } else { Ok(self.mk_expr(span_lo.to(body.span), ExprKind::TryBlock(body), attrs)) } } fn is_do_catch_block(&self) -> bool { self.token.is_keyword(kw::Do) && self.is_keyword_ahead(1, &[kw::Catch]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) && !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } fn is_try_block(&self) -> bool { self.token.is_keyword(kw::Try) && self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) && self.token.span.rust_2018() && // prevent `while try {} {}`, `if try {} {} else {}`, etc. !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } /// Parses an `async move? {...}` expression. pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let span_lo = self.token.span; self.expect_keyword(kw::Async)?; let capture_clause = self.parse_capture_clause(); let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr( span_lo.to(body.span), ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs)) } fn is_async_block(&self) -> bool { self.token.is_keyword(kw::Async) && ( ( // `async move {` self.is_keyword_ahead(1, &[kw::Move]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) ) || ( // `async {` self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) ) ) } fn maybe_parse_struct_expr( &mut self, lo: Span, path: &ast::Path, attrs: &ThinVec<Attribute>, ) -> Option<PResult<'a, P<Expr>>> { let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); let certainly_not_a_block = || self.look_ahead(1, |t| t.is_ident()) && ( // `{ ident, ` cannot start a block self.look_ahead(2, |t| t == &token::Comma) || self.look_ahead(2, |t| t == &token::Colon) && ( // `{ ident: token, ` cannot start a block self.look_ahead(4, |t| t == &token::Comma) || // `{ ident: ` cannot start a block unless it's a type ascription `ident: Type` self.look_ahead(3, |t| !t.can_begin_type()) ) ); if struct_allowed || certainly_not_a_block() { // This is a struct literal, but we don't can't accept them here let expr = self.parse_struct_expr(lo, path.clone(), attrs.clone()); if let (Ok(expr), false) = (&expr, struct_allowed) { self.struct_span_err( expr.span, "struct literals are not allowed here", ) .multipart_suggestion( "surround the struct literal with parentheses", vec![ (lo.shrink_to_lo(), "(".to_string()), (expr.span.shrink_to_hi(), ")".to_string()), ], Applicability::MachineApplicable, ) .emit(); } return Some(expr); } None } pub(super) fn parse_struct_expr( &mut self, lo: Span, pth: ast::Path, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let struct_sp = lo.to(self.prev_span); self.bump(); let mut fields = Vec::new(); let mut base = None; attrs.extend(self.parse_inner_attributes()?); while self.token != token::CloseDelim(token::Brace) { if self.eat(&token::DotDot) { let exp_span = self.prev_span; match self.parse_expr() { Ok(e) => { base = Some(e); } Err(mut e) => { e.emit(); self.recover_stmt(); } } if self.token == token::Comma { self.struct_span_err( exp_span.to(self.prev_span), "cannot use a comma after the base struct", ) .span_suggestion_short( self.token.span, "remove this comma", String::new(), Applicability::MachineApplicable ) .note("the base struct must always be the last field") .emit(); self.recover_stmt(); } break; } let mut recovery_field = None; if let token::Ident(name, _) = self.token.kind { if !self.token.is_reserved_ident() && self.look_ahead(1, |t| *t == token::Colon) { // Use in case of error after field-looking code: `S { foo: () with a }` recovery_field = Some(ast::Field { ident: Ident::new(name, self.token.span), span: self.token.span, expr: self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new()), is_shorthand: false, attrs: ThinVec::new(), id: ast::DUMMY_NODE_ID, }); } } let mut parsed_field = None; match self.parse_field() { Ok(f) => parsed_field = Some(f), Err(mut e) => { e.span_label(struct_sp, "while parsing this struct"); e.emit(); // If the next token is a comma, then try to parse // what comes next as additional fields, rather than // bailing out until next `}`. if self.token != token::Comma { self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); if self.token != token::Comma { break; } } } } match self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) { Ok(_) => if let Some(f) = parsed_field.or(recovery_field) { // only include the field if there's no parse error for the field name fields.push(f); } Err(mut e) => { if let Some(f) = recovery_field { fields.push(f); } e.span_label(struct_sp, "while parsing this struct"); e.emit(); self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); self.eat(&token::Comma); } } } let span = lo.to(self.token.span); self.expect(&token::CloseDelim(token::Brace))?; return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs)); } /// Parse ident (COLON expr)? fn parse_field(&mut self) -> PResult<'a, Field> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; // Check if a colon exists one ahead. This means we're parsing a fieldname. let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| { t == &token::Colon || t == &token::Eq }) { let fieldname = self.parse_field_name()?; // Check for an equals token. This means the source incorrectly attempts to // initialize a field with an eq rather than a colon. if self.token == token::Eq { self.diagnostic() .struct_span_err(self.token.span, "expected `:`, found `=`") .span_suggestion( fieldname.span.shrink_to_hi().to(self.token.span), "replace equals symbol with a colon", ":".to_string(), Applicability::MachineApplicable, ) .emit(); } self.bump(); // `:` (fieldname, self.parse_expr()?, false) } else { let fieldname = self.parse_ident_common(false)?; // Mimic `x: x` for the `x` field shorthand. let path = ast::Path::from_ident(fieldname); let expr = self.mk_expr(fieldname.span, ExprKind::Path(None, path), ThinVec::new()); (fieldname, expr, true) }; Ok(ast::Field { ident: fieldname, span: lo.to(expr.span), expr, is_shorthand, attrs: attrs.into(), id: ast::DUMMY_NODE_ID, }) } fn err_dotdotdot_syntax(&self, span: Span) { self.struct_span_err(span, "unexpected token: `...`") .span_suggestion( span, "use `..` for an exclusive range", "..".to_owned(), Applicability::MaybeIncorrect ) .span_suggestion( span, "or `..=` for an inclusive range", "..=".to_owned(), Applicability::MaybeIncorrect ) .emit(); } fn err_larrow_operator(&self, span: Span) { self.struct_span_err( span, "unexpected token: `<-`" ).span_suggestion( span, "if you meant to write a comparison against a negative value, add a \ space in between `<` and `-`", "< -".to_string(), Applicability::MaybeIncorrect ).emit(); } fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::AssignOp(binop, lhs, rhs) } fn mk_range( &self, start: Option<P<Expr>>, end: Option<P<Expr>>, limits: RangeLimits ) -> PResult<'a, ExprKind> { if end.is_none() && limits == RangeLimits::Closed { Err(self.span_fatal_err(self.token.span, Error::InclusiveRangeWithNoEnd)) } else { Ok(ExprKind::Range(start, end, limits)) } } fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind { ExprKind::Unary(unop, expr) } fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::Binary(binop, lhs, rhs) } fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind { ExprKind::Index(expr, idx) } fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind { ExprKind::Call(f, args) } fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let span = lo.to(self.prev_span); let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), ThinVec::new()); self.recover_from_await_method_call(); Ok(await_expr) } crate fn mk_expr(&self, span: Span, node: ExprKind, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { node, span, attrs, id: ast::DUMMY_NODE_ID }) } }
let elexpr = self.parse_else_expr()?; hi = elexpr.span; els = Some(elexpr);
sqlite3-search-bench.py
import os import os.path from time import perf_counter as clock import numpy import random # in order to always generate the same random sequence random.seed(19) def fill_arrays(start, stop):
# Generator for ensure pytables benchmark compatibility def int_generator(nrows): step = 1000 * 100 j = 0 for i in range(nrows): if i >= step * j: stop = (j + 1) * step if stop > nrows: # Seems unnecessary stop = nrows col_i, col_j = fill_arrays(i, stop) j += 1 k = 0 yield (col_i[k], col_j[k]) k += 1 def int_generator_slow(nrows): for i in range(nrows): if userandom: yield (i, float(random.randint(0, nrows))) else: yield (i, float(i)) def open_db(filename, remove=0): if remove and os.path.exists(filename): os.remove(filename) con = sqlite.connect(filename) cur = con.cursor() return con, cur def create_db(filename, nrows): con, cur = open_db(filename, remove=1) cur.execute("create table ints(i integer, j real)") t1 = clock() # This is twice as fast as a plain loop cur.executemany("insert into ints(i,j) values (?,?)", int_generator(nrows)) con.commit() ctime = clock() - t1 if verbose: print(f"insert time: {ctime:.5f}") print(f"Krows/s: {nrows / 1000 / ctime:.5f}") close_db(con, cur) def index_db(filename): con, cur = open_db(filename) t1 = clock() cur.execute("create index ij on ints(j)") con.commit() itime = clock() - t1 if verbose: print(f"index time: {itime:.5f}") print(f"Krows/s: {nrows / itime:.5f}") # Close the DB close_db(con, cur) def query_db(filename, rng): con, cur = open_db(filename) t1 = clock() ntimes = 10 for i in range(ntimes): # between clause does not seem to take advantage of indexes # cur.execute("select j from ints where j between %s and %s" % \ cur.execute("select i from ints where j >= %s and j <= %s" % # cur.execute("select i from ints where i >= %s and i <= # %s" % (rng[0] + i, rng[1] + i)) results = cur.fetchall() con.commit() qtime = (clock() - t1) / ntimes if verbose: print(f"query time: {qtime:.5f}") print(f"Mrows/s: {nrows / 1000 / qtime:.5f}") print(results) close_db(con, cur) def close_db(con, cur): cur.close() con.close() if __name__ == "__main__": import sys import getopt try: import psyco psyco_imported = 1 except: psyco_imported = 0 usage = """usage: %s [-v] [-p] [-m] [-i] [-q] [-c] [-R range] [-n nrows] file -v verbose -p use "psyco" if available -m use random values to fill the table -q do query -c create the database -i index the table -2 use sqlite2 (default is use sqlite3) -R select a range in a field in the form "start,stop" (def "0,10") -n sets the number of rows (in krows) in each table \n""" % sys.argv[0] try: opts, pargs = getopt.getopt(sys.argv[1:], 'vpmiqc2R:n:') except: sys.stderr.write(usage) sys.exit(0) # default options verbose = 0 usepsyco = 0 userandom = 0 docreate = 0 createindex = 0 doquery = 0 sqlite_version = "3" rng = [0, 10] nrows = 1 # Get the options for option in opts: if option[0] == '-v': verbose = 1 elif option[0] == '-p': usepsyco = 1 elif option[0] == '-m': userandom = 1 elif option[0] == '-i': createindex = 1 elif option[0] == '-q': doquery = 1 elif option[0] == '-c': docreate = 1 elif option[0] == "-2": sqlite_version = "2" elif option[0] == '-R': rng = [int(i) for i in option[1].split(",")] elif option[0] == '-n': nrows = int(option[1]) # Catch the hdf5 file passed as the last argument filename = pargs[0] if sqlite_version == "2": import sqlite else: from pysqlite2 import dbapi2 as sqlite if verbose: print("pysqlite version:", sqlite.version) if userandom: print("using random values") if docreate: if verbose: print("writing %s krows" % nrows) if psyco_imported and usepsyco: psyco.bind(create_db) nrows *= 1000 create_db(filename, nrows) if createindex: index_db(filename) if doquery: query_db(filename, rng)
col_i = numpy.arange(start, stop, dtype=numpy.int32) if userandom: col_j = numpy.random.uniform(0, nrows, stop - start) else: col_j = numpy.array(col_i, dtype=numpy.float64) return col_i, col_j
rpl_sync.py
# # Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """ This file contains features to check the data consistency in a replication topology (i.e., between the master and its slaves, or only slaves), providing synchronization features to perform the check over the (supposed) same data of a system with replication active (running). """ import re import sys from multiprocessing.pool import ThreadPool from mysql.utilities.command.dbcompare import diff_objects, get_common_objects from mysql.utilities.common.database import Database from mysql.utilities.common.messages import ERROR_USER_WITHOUT_PRIVILEGES from mysql.utilities.common.pattern_matching import convertSQL_LIKE2REGEXP from mysql.utilities.common.replication import (get_last_server_gtid, gtid_set_cardinality, gtid_set_union) from mysql.utilities.common.sql_transform import quote_with_backticks from mysql.utilities.common.topology import Topology from mysql.utilities.common.user import User from mysql.utilities.exception import UtilError # Regular expression to handle the server version format. _RE_VERSION_FORMAT = r'^(\d+\.\d+(\.\d+)*).*$' class
(object): """Class to manage the features of the replication synchronization checker. The RPLSynchronizer class is used to manage synchronization check between servers of a replication topology, namely between the master and its slaves or only between slaves. It provides functions to determine the slaves missing transactions (i.e., missing GTIDs) and check data consistency. """ def __init__(self, master_cnx_dic, slaves_cnx_dic_lst, options): """Constructor. options[in] dictionary of options (e.g., discover, timeouts, verbosity). """ self._verbosity = options.get('verbosity') self._rpl_timeout = options.get('rpl_timeout') self._checksum_timeout = options.get('checksum_timeout') self._interval = options.get('interval') self._rpl_topology = Topology(master_cnx_dic, slaves_cnx_dic_lst, options) self._slaves = self._rpl_topology.get_slaves_dict() # Set base server used as reference for comparisons. self._base_server = None self._base_server_key = None self._set_base_server() # Check user permissions to perform the consistency check. self._check_privileges() # Check usage of replication filters. self._master_rpl_filters = {} self._slaves_rpl_filters = {} self._check_rpl_filters() def _set_base_server(self): """Set the base server used for comparison in the internal state. Set the master if used or the first slave from the topology as the base server. The base server is the one used as a reference for comparison with the others. This method sets two instance variables: _base_server with the Server instance, and _base_server_key with the string identifying the server (format: 'host@port'). Note: base server might need to be changed (set again) if it is removed from the topology for some reason (e.g. GTID disabled). """ master = self._get_master() self._base_server = master if master \ else self._rpl_topology.slaves[0]['instance'] self._base_server_key = "{0}@{1}".format(self._base_server.host, self._base_server.port) def _get_slave(self, slave_key): """Get the slave server instance for the specified key 'host@port'. This function retrieves the Server instance of for a slave from the internal state by specifying the key that uniquely identifies it, i.e. 'host@port'. slave_key[in] String with the format 'host@port' that uniquely identifies a server. Returns a Server instance of the slave with the specified key value (i.e., 'host@port'). """ slave_dict = self._slaves[slave_key] return slave_dict['instance'] def _get_master(self): """Get the master server instance. This function retrieves the Server instance of the master (in the replication topology). Returns a Server instance of the master. """ return self._rpl_topology.master def _check_privileges(self): """Check required privileges to perform the synchronization check. This method check if the used users for the master and slaves possess the required privileges to perform the synchronization check. More specifically, the following privileges are required: - on the master: SUPER or REPLICATION CLIENT, LOCK TABLES and SELECT; - on slaves: SUPER and SELECT. An exception is thrown if users doesn't have enough privileges. """ if self._verbosity: print("# Checking users permission to perform consistency check.\n" "#") # Check privileges for master. master_priv = [('SUPER', 'REPLICATION CLIENT'), ('LOCK TABLES',), ('SELECT',)] master_priv_str = "SUPER or REPLICATION CLIENT, LOCK TABLES and SELECT" if self._get_master(): server = self._get_master() user_obj = User(server, "{0}@{1}".format(server.user, server.host)) for any_priv_tuple in master_priv: has_privilege = any( [user_obj.has_privilege('*', '*', priv) for priv in any_priv_tuple] ) if not has_privilege: raise UtilError(ERROR_USER_WITHOUT_PRIVILEGES.format( user=server.user, host=server.host, port=server.port, operation='perform the synchronization check', req_privileges=master_priv_str )) # Check privileges for slaves. slave_priv = [('SUPER',), ('SELECT',)] slave_priv_str = "SUPER and SELECT" for slave_key in self._slaves: server = self._get_slave(slave_key) user_obj = User(server, "{0}@{1}".format(server.user, server.host)) for any_priv_tuple in slave_priv: has_privilege = any( [user_obj.has_privilege('*', '*', priv) for priv in any_priv_tuple] ) if not has_privilege: raise UtilError( "User '{0}' on '{1}@{2}' does not have sufficient " "privileges to perform the synchronization check " "(required: {3}).".format(server.user, server.host, server.port, slave_priv_str) ) def _check_rpl_filters(self): """Check usage of replication filters. Check the usage of replication filtering option on the master (if defined) and slaves, and set the internal state with the found options (to check later). """ # Get binlog filtering option for the master. if self._get_master(): m_filters = self._get_master().get_binlog_exceptions() if m_filters: # Set filtering option for master. self._master_rpl_filters['binlog_do_db'] = \ m_filters[0][1].split(',') if m_filters[0][1] else None self._master_rpl_filters['binlog_ignore_db'] = \ m_filters[0][2].split(',') if m_filters[0][2] else None # Get replication filtering options for each slave. for slave_key in self._slaves: slave = self._get_slave(slave_key) s_filters = slave.get_slave_rpl_filters() if s_filters: # Handle known server issues with some replication filters, # leading to inconsistent GTID sets. Sync not supported for # server with those issues. issues = [(0, 'replicate_do_db'), (1, 'replicate_ignore_db'), (4, 'replicate_wild_do_table')] for index, rpl_opt in issues: if s_filters[index]: raise UtilError( "Use of {0} option is not supported. There is a " "known issue with the use this replication filter " "and GTID for some server versions. Issue " "detected for '{1}'.".format(rpl_opt, slave_key)) # Set map (dictionary) with the slave filtering options. filters_map = { 'replicate_do_db': s_filters[0].split(',') if s_filters[0] else None, 'replicate_ignore_db': s_filters[1].split(',') if s_filters[1] else None, 'replicate_do_table': s_filters[2].split(',') if s_filters[2] else None, 'replicate_ignore_table': s_filters[3].split(',') if s_filters[3] else None, } # Handle wild-*-table filters differently to create # corresponding regexp. if s_filters[4]: wild_list = s_filters[4].split(',') filters_map['replicate_wild_do_table'] = wild_list # Create auxiliary list with compiled regexp to match. regexp_list = [] for wild in wild_list: regexp = re.compile(convertSQL_LIKE2REGEXP(wild)) regexp_list.append(regexp) filters_map['regexp_do_table'] = regexp_list else: filters_map['replicate_wild_do_table'] = None filters_map['regexp_do_table'] = None if s_filters[5]: wild_list = s_filters[5].split(',') filters_map['replicate_wild_ignore_table'] = wild_list # Create auxiliary list with compiled regexp to match. regexp_list = [] for wild in wild_list: regexp = re.compile(convertSQL_LIKE2REGEXP(wild)) regexp_list.append(regexp) filters_map['regexp_ignore_table'] = regexp_list else: filters_map['replicate_wild_ignore_table'] = None filters_map['regexp_ignore_table'] = None # Set filtering options for the slave. self._slaves_rpl_filters[slave_key] = filters_map # Print warning if filters are found. if self._master_rpl_filters or self._slaves_rpl_filters: print("# WARNING: Replication filters found on checked " "servers. This can lead data consistency issues " "depending on how statements are evaluated.\n" "# More information: " "http://dev.mysql.com/doc/en/replication-rules.html") if self._verbosity: # Print filter options in verbose mode. if self._master_rpl_filters: print("# Master '{0}@{1}':".format( self._get_master().host, self._get_master().port )) for rpl_filter in self._master_rpl_filters: if self._master_rpl_filters[rpl_filter]: print("# - {0}: {1}".format( rpl_filter, ', '.join( self._master_rpl_filters[rpl_filter] ) )) if self._slaves_rpl_filters: for slave_key in self._slaves_rpl_filters: print("# Slave '{0}':".format(slave_key)) filters_map = self._slaves_rpl_filters[slave_key] for rpl_filter in filters_map: if (rpl_filter.startswith('replicate') and filters_map[rpl_filter]): print("# - {0}: {1}".format( rpl_filter, ', '.join(filters_map[rpl_filter]) )) def _is_rpl_filtered(self, db_name, tbl_name=None, slave=None): """ Check if the given object is to be filtered by replication. This method checks if the given database or table name is supposed to be filtered by replication (i.e., not replicated), according to the defined replication filters for the master or the specified slave. db_name[in] Name of the database to check (not backtick quoted) or associated to the table to check.. tbl_name[in] Name of the table to check (not backtick quoted). Table level filtering rules are only checked if this value is not None. By default None, meaning that only the database level rules are checked. slave[in] Identification of the slave in the format 'host@port' to check, determining which filtering rules will be checked. If None only the master filtering rules are checked, otherwise the rule of the specified slaves are used. By default: None. Returns a boolean value indicating if the given database or table is supposed to be filtered by the replication or not. More precisely, if True then updates associated to the object are (supposedly) not replicated, otherwise they are replicated. """ def match_regexp(name, regex_list): """ Check if 'name' matches one of the regex in the given list. """ for regex in regex_list: if regex.match(name): return True return False # Determine object to check and set full qualified name. is_db = tbl_name is None obj_name = db_name if is_db else '{0}.{1}'.format(db_name, tbl_name) # Match replication filter for Master. if not slave and is_db and self._master_rpl_filters: if self._master_rpl_filters['binlog_do_db']: if obj_name in self._master_rpl_filters['binlog_do_db']: return False else: return True elif self._master_rpl_filters['binlog_ignore_db']: if obj_name in self._master_rpl_filters['binlog_ignore_db']: return True # Match replication filters for the specified slave. if slave and slave in self._slaves_rpl_filters: rpl_filter = self._slaves_rpl_filters[slave] if is_db: if rpl_filter['replicate_do_db']: if obj_name in rpl_filter['replicate_do_db']: return False else: return True elif (rpl_filter['replicate_ignore_db'] and obj_name in rpl_filter['replicate_ignore_db']): return True else: if (rpl_filter['replicate_do_table'] and obj_name in rpl_filter['replicate_do_table']): return False if (rpl_filter['replicate_ignore_table'] and obj_name in rpl_filter['replicate_ignore_table']): return True if (rpl_filter['replicate_wild_do_table'] and match_regexp(obj_name, rpl_filter['regexp_do_table'])): return False if (rpl_filter['replicate_wild_ignore_table'] and match_regexp(obj_name, rpl_filter['regexp_ignore_table'])): return True if (rpl_filter['replicate_do_table'] or rpl_filter['replicate_wild_do_table']): return True # Do not filter replication for object (if no filter rule matched). return False def _apply_for_all_slaves(self, slaves, function, args=(), kwargs=None, multithreading=False): """Apply specified function to all given slaves. This function allow the execution (concurrently or not) of the specified function with the given arguments on all the specified slaves. slaves[in] List of slaves to apply the function. It is assumed that the list is composed by strings with the format 'host@port', identifying each slave. function[in] Name of the function (string) to apply on all slaves. args[in] Tuple with all the function arguments (except keyword arguments). kwargs[in] Dictionary with all the function keyword arguments. multithreading[in] Boolean value indicating if the function will be applied concurrently on all slaves. By default False, no concurrency. Return a list of tuples composed by two elements: a string identifying the slave ('host@port') and the result of the execution of the target function for the corresponding slave. """ if kwargs is None: kwargs = {} if multithreading: # Create a pool of threads to execute the method for each slave. pool = ThreadPool(processes=len(slaves)) thread_res_lst = [] for slave_key in slaves: slave = self._get_slave(slave_key) thread_res = pool.apply_async(getattr(slave, function), args, kwargs) thread_res_lst.append((slave_key, thread_res)) pool.close() # Wait for all threads to finish here to avoid RuntimeErrors when # waiting for the result of a thread that is already dead. pool.join() # Get the result from each slave and return the results. res = [] for slave_key, thread_res in thread_res_lst: res.append((slave_key, thread_res.get())) return res else: res = [] for slave_key in slaves: slave = self._get_slave(slave_key) slave_res = getattr(slave, function)(*args, **kwargs) res.append((slave_key, slave_res)) return res def check_server_versions(self): """Check server versions. Check all server versions and report version differences. """ srv_versions = {} # Get the server version of the master if used. master = self._get_master() if master: master_version = master.get_version() match = re.match(_RE_VERSION_FORMAT, master_version.strip()) if match: # Add .0 as release version if not provided. if not match.group(2): master_version = "{0}.0".format(match.group(1)) else: master_version = match.group(1) master_id = '{0}@{1}'.format(master.host, master.port) # Store the master version. srv_versions[master_version] = [master_id] # Get the server version for all slaves. for slave_key in self._slaves: slave = self._get_slave(slave_key) version = slave.get_version() match = re.match(_RE_VERSION_FORMAT, version.strip()) if match: # Add .0 as release version if not provided. if not match.group(2): version = "{0}.0".format(match.group(1)) else: version = match.group(1) # Store the slave version. if version in srv_versions: srv_versions[version].append(slave_key) else: srv_versions[version] = [slave_key] # Check the servers versions and issue a warning if different. if len(srv_versions) > 1: print("# WARNING: Servers using different versions:") for version in srv_versions: servers_str = ",".join(srv_versions[version]) print("# - {0} for {1}.".format(version, servers_str)) print("#") def check_gtid_sync(self): """Check GTIDs synchronization. Perform several GTID checks (enabled and errant transactions). If the master is available (was specified) then it also checks if GTIDs are in sync between master and its slaves and report the amount of transaction (i.e., GTIDs) behind the master for each slave. GTID differences might be an indicator of the existence of data consistency issues. Note: The master may not be specified, its use is not mandatory. """ # Check if GTIDs are enabled on the topology. if self._get_master(): # Use of Master is not mandatory. # GTIDs must be enabled on the master. if self._get_master().supports_gtid().upper() != 'ON': raise UtilError( "Master must support GTIDs and have GTID_MODE=ON." ) # Skip slaves without GTID enabled and warn user. reset_base_srv = False for slave_key, slave_dict in self._slaves.items(): slave = slave_dict['instance'] support_gtid = slave.supports_gtid().upper() if support_gtid != 'ON': reason = "GTID_MODE=OFF" if support_gtid == 'OFF' \ else "not support GTIDs" print("# WARNING: Slave '{0}' will be skipped - " "{1}.".format(slave_key, reason)) print("#") del self._slaves[slave_key] self._rpl_topology.remove_slave(slave_dict) if slave_key == self._base_server_key: reset_base_srv = True # At least on slave must have GTIDs enabled. if len(self._slaves) == 0: raise UtilError("No slaves found with GTID support and " "GTID_MODE=ON.") # Reset base server if needed (it must have GTID_MODE=ON). if reset_base_srv: self._set_base_server() # Check the set of executed GTIDs and report differences, only if the # master is specified. if self._get_master(): master_gtids = self._get_master().get_gtid_executed() slaves_gtids_data = \ self._rpl_topology.slaves_gtid_subtract_executed( master_gtids, multithreading=True ) print("#\n# GTID differences between Master and Slaves:") for host, port, gtids_missing in slaves_gtids_data: slave_key = '{0}@{1}'.format(host, port) gtid_size = gtid_set_cardinality(gtids_missing) if gtid_size: plural = 's' if gtid_size > 1 else '' print("# - Slave '{0}' is {1} transaction{2} behind " "Master.".format(slave_key, gtid_size, plural)) if self._verbosity: print("# Missing GTIDs: " "{0}".format(gtids_missing)) else: print("# - Slave '{0}' is up-to-date.".format(slave_key)) print("#") @staticmethod def _exist_in_obj_list(obj_name, obj_type, obj_list): """Check if object (name and type) exists in the given list. This function checks if the database object for the specified name and type exists in the specified list of database objects. obj_name[in] Name of the object to check. obj_type[in] Type of the object to check. obj_list[in] List of objects to check. It is assumed that the list has the format of the ones returned by the function mysql.utilities.command.dbcompare.get_common_objects(). More precisely with the format: [(obj_type1, (obj_name1,))..(obj_typeN, (obj_nameN,))] Returns a boolean value indicating if object with the specified name and type exists in the specified list of objects. """ for obj_row in obj_list: if obj_row[0] == obj_type and obj_row[1][0] == obj_name: return True return False def _split_active_slaves(self, slaves): """Get the list of slaves with replication running and not. This method separates the list of given slaves into active (with the IO and SQL thread running) and non active slaves (with one of the threads stopped). slaves[in] List of target slaves to separate. Returns a tuple with two elements, first with the list of active slaves and the second with the list of not active ones. """ # Get slaves status. slaves_state = self._apply_for_all_slaves(slaves, 'get_slaves_errors', multithreading=True) # Store IO and SQL thread status. active_slaves = [] not_active_slaves = [] for slave_key, state in slaves_state: # Locally store IO and SQL threads status. io_running = state[3].upper() == 'YES' self._slaves[slave_key]['IO_Running'] = io_running sql_running = state[4].upper() == 'YES' self._slaves[slave_key]['SQL_Running'] = sql_running if io_running and sql_running: active_slaves.append(slave_key) else: not_active_slaves.append(slave_key) print("# WARNING: Slave not active '{0}' - " "Sync skipped.".format(slave_key)) if self._verbosity: # Print warning if slave is stopped due to an error. if not io_running and state[2]: print("# - IO thread stopped: ERROR {0} - " "{1}".format(state[1], state[2])) if not sql_running and state[6]: print("# - SQL thread stopped: ERROR {0} - " "{1}".format(state[5], state[6])) # Return separated list of active and non active replication slaves. return active_slaves, not_active_slaves def _compute_sync_point(self, active_slaves=None): """Compute the GTID synchronization point. This method computes the GTID synchronization point based based on the GTID_EXECUTED set. If a master is available for synchronization the last GTID from the GTID_EXECUTED set is used as sync point If no master is available the union of the GTID_EXECUTED sets among all active slaves is used as the sync point. active_slaves[in] List of active slaves to consider. Only required if the master is not available. It is assumed that the list is composed by strings with the format 'host@port', identifying each slave. Return a GTID set representing to synchronization point (to wait for slaves to catch up and stop). """ if self._get_master(): gtid_set = self._get_master().get_gtid_executed() master_uuid = self._get_master().get_server_uuid() return get_last_server_gtid(gtid_set, master_uuid) else: # Get GTID_EXECUTED on all slaves. all_gtid_executed = self._apply_for_all_slaves( active_slaves, 'get_gtid_executed', multithreading=True ) # Compute the union of all GTID sets for each UUID among slaves. gtid_sets_by_uuid = {} for _, gtid_executed in all_gtid_executed: gtids_list = gtid_executed.split("\n") for gtid in gtids_list: gtid_set = gtid.rstrip(', ') uuid = gtid_set.split(':')[0] if uuid not in gtid_sets_by_uuid: gtid_sets_by_uuid[uuid] = gtid_set else: union_set = gtid_set_union(gtid_sets_by_uuid[uuid], gtid_set) gtid_sets_by_uuid[uuid] = union_set # Return union of all know executed GTID. return ",".join(gtid_sets_by_uuid.itervalues()) def _sync_slaves(self, slaves, gtid): """Set synchronization point (specified GTID set) for the given slaves. The method set the synchronization point for the given slaves by (concurrently) stopping and immediately executing START SLAVE UNTIL on all given slaves in order to stop upon reaching the given GTID set (i.e., committing all corresponding transactions for the given GTID sync point). slaves[in] List of target slaves to synchronize (i.e., instruct to stop upon reaching the synchronization point). gtid[in] GTID set used as the synchronization point. """ # Make running slaves stop until sync point (GTID) is reached. if self._verbosity: print("# Setting data synchronization point for slaves.") # STOP slave (only SQL thread). self._apply_for_all_slaves(slaves, 'stop_sql_thread', multithreading=True) # START slave UNTIL sync point is reached. # Note: Only the SQL thread is stopped when the condition is reached. until_ops = {'until_gtid_set': gtid, 'sql_after_gtid': True, 'only_sql_thread': True} self._apply_for_all_slaves(slaves, 'start', (), until_ops, multithreading=True) def _checksum_and_resume_rpl(self, not_sync_slaves, sync_slave, table): """Checksum table and resume replication on slaves. This method computes (concurrently) the table checksum of the given slaves lists (those synced and not synced). For the list of not synced slaves the table checksum is immediately computed. For the list of synced slaves, first it waits for them to catch up and the sync point and only then compute the table checksum and resume replication. not_sync_slaves[in] List of not synced slaves. sync_slave[in] List of (previously) synced slaves. table[in] Target table to compute the checksum. Returns a list of tuples, each tuple containing the identification of the server and the corresponding checksum result. """ if self._verbosity: print("# Compute checksum on slaves (wait to catch up and resume" " replication).") sys.stdout.flush() not_sync_checksum = [] if not_sync_slaves: not_sync_checksum = self._apply_for_all_slaves( not_sync_slaves, 'checksum_table', (table,), {'exec_timeout': self._checksum_timeout}, multithreading=True ) sync_checksum = [] if sync_slave: sync_checksum = self._apply_for_all_slaves( sync_slave, 'wait_checksum_and_start', (table,), {'wait_timeout': self._rpl_timeout, 'wait_interval': self._interval, 'checksum_timeout': self._checksum_timeout}, multithreading=True ) return not_sync_checksum + sync_checksum def _check_table_data_sync(self, table, slaves): """Check table data synchronization for specified slaves. This method check the data consistency for the specified table between the base server (master or slave) and the specified salves. This operation requires the definition of a "synchronization point" in order to ensure that the "supposed" same data is compared between servers. This coordination process is based on GTIDs (checking that all data until a given GTID has been processed on the slaves). A different algorithm is used to set the "synchronization point" depending if the master is used or not. The data consistency is checked relying on the CHECKSUM TABLE query. If an error occur during this process, any locked table must be unlocked and both master and slaves should resume their previous activity. Important note: this method assumes that the table exists on the base server and all specified slaves, therefore checking the existence of the table as well as other integrity checks (server versions, GTID definitions, etc.) need to be performed outside the scope of this method. table[in] Qualified name of the table to check (quoted with backticks). slaves[in] List of slaves to check. Each element of the list must be a string with the format 'host@port'. Returns the number of data consistency found. """ success = False checksum_issues = 0 # If no master used then add base server (slave) to slaves to sync. if not self._get_master(): slaves = slaves + [self._base_server_key] # Separate active from non active slaves. active_slaves, not_active_slaves = self._split_active_slaves(slaves) if self._get_master(): # Lock the table on the master to get GTID synchronization point # and perform the table checksum. try: self._get_master().exec_query( "LOCK TABLES {0} READ".format(table) ) last_exec_gtid = self._compute_sync_point() if self._verbosity > 2: print("# Sync point GTID: {0}".format(last_exec_gtid)) # Immediately instruct active slaves to stop on sync point. if active_slaves: self._sync_slaves(active_slaves, last_exec_gtid) # Perform table checksum on master. base_server_checksum = self._get_master().checksum_table( table, self._checksum_timeout ) if base_server_checksum[0]: success = True # Successful checksum for base server. if self._verbosity > 2: print("# Checksum on base server (Master): " "{0}".format(base_server_checksum[0][1])) else: print("# [SKIP] {0} checksum on base server (Master) - " "{1}".format(table, base_server_checksum[1])) finally: # Unlock table. self._get_master().exec_query("UNLOCK TABLES") elif active_slaves: # Perform sync without master, only based on active slave (if any). try: # Stop all active slaves to get the GTID synchronization point. self._apply_for_all_slaves( active_slaves, 'stop_sql_thread', multithreading=True ) sync_gtids = self._compute_sync_point(active_slaves) if self._verbosity > 2: print("# Sync point GTID: {0}".format(sync_gtids)) # Instruct active slaves to stop on sync point. self._sync_slaves(active_slaves, sync_gtids) except UtilError: # Try to restart the slaves in case an error occurs. self._apply_for_all_slaves( active_slaves, 'star_sql_thread', multithreading=True ) # Compute checksum on all slaves and return to previous state. slaves_checksum = self._checksum_and_resume_rpl(not_active_slaves, active_slaves, table) # Check if checksum for base server was successfully computed. if not self._get_master(): for slave_key, checksum in slaves_checksum: if slave_key == self._base_server_key: if checksum[0]: success = True # Successful checksum for base server. base_server_checksum = checksum slaves_checksum.remove((slave_key, checksum)) if self._verbosity > 2: print("# Checksum on base server: " "{0}".format(base_server_checksum[0][1])) else: print("# [SKIP] {0} checksum on base server - " "{1}".format(table, checksum[1])) break # Compare checksum and report results. if success and slaves_checksum: for slave_key, checksum_res in slaves_checksum: if checksum_res[0] is None: print("# [SKIP] {0} checksum for Slave '{1}' - " "{2}.".format(table, slave_key, checksum_res[1])) else: if self._verbosity > 2: checksum_val = ': {0}'.format(checksum_res[0][1]) else: checksum_val = '' if checksum_res[0] != base_server_checksum[0]: print("# [DIFF] {0} checksum for server '{1}'" "{2}.".format(table, slave_key, checksum_val)) checksum_issues += 1 else: print("# [OK] {0} checksum for server '{1}'" "{2}.".format(table, slave_key, checksum_val)) return checksum_issues def check_data_sync(self, options, data_to_include, data_to_exclude): """Check data synchronization. Check if the data (in all tables) is in sync between the checked servers (master and its slaves, or only slaves). It reports structure difference database/tables missing or with a different definition and data differences between a base server and the others. Note: A different algorithm is applied to perform the synchronization, depending if the master is specified (available) or not. options[in] Dictionary of options. data_to_include[in] Dictionary of data (set of tables) by database to check. data_to_exclude[in] Dictionary of data (set of tables) by database to exclude from check. Returns the number of consistency issues found (comparing database definitions and data). """ issues_count = 0 # Skip all database objects, except tables. options['skip_views'] = True options['skip_triggers'] = True options['skip_procs'] = True options['skip_funcs'] = True options['skip_events'] = True options['skip_grants'] = True diff_options = {} diff_options.update(options) diff_options['quiet'] = True # Do not print messages. diff_options['suppress_sql'] = True # Do not print SQL statements. diff_options['skip_table_opts'] = True # Ignore AUTO_INCREMENT diffs. # Check the server version requirement to support sync features. # Slave servers of version >= 5.6.14 are required due to a known issue # for START SLAVE UNTIL with the SQL_AFTER_GTIDS option. More info: # https://dev.mysql.com/doc/refman/5.6/en/start-slave.html for slave_key in self._slaves: if not self._get_slave(slave_key).check_version_compat(5, 6, 14): raise UtilError( "Server '{0}' version must be 5.6.14 or greater. Sync is " "not supported for versions prior to 5.6.14 due to a " "known issue with START SLAVE UNTIL and the " "SQL_AFTER_GTIDS option.".format(slave_key)) print("# Checking data consistency.\n#") base_srv_type = 'Master' if self._get_master() else 'Slave' print("# Using {0} '{1}' as base server for comparison." "".format(base_srv_type, self._base_server_key)) # Get all databases from the base server. db_rows = self._base_server.get_all_databases() base_server_dbs = set([row[0] for row in db_rows]) # Process databases to include/exclude from check. db_to_include = set() if data_to_include: db_to_include = set([db for db in data_to_include]) base_server_dbs = base_server_dbs & db_to_include not_exist_db = db_to_include - base_server_dbs if not_exist_db: plurals = ('s', '') if len(not_exist_db) > 1 else ('', 'es') print('# WARNING: specified database{0} to check do{1} not ' 'exist on base server and will be skipped: ' '{2}.'.format(plurals[0], plurals[1], ", ".join(not_exist_db))) db_to_exclude = set() if data_to_exclude: db_to_exclude = set( [db for db in data_to_exclude if not data_to_exclude[db]] ) base_server_dbs = base_server_dbs - db_to_exclude # Check databases on slaves (except the base server). slaves_except_base = [key for key in self._slaves if key != self._base_server_key] for slave_key in slaves_except_base: slave = self._get_slave(slave_key) db_rows = slave.get_all_databases() slave_dbs = set([row[0] for row in db_rows]) # Process databases to include/exclude. if db_to_include: slave_dbs = slave_dbs & db_to_include if db_to_exclude: slave_dbs = slave_dbs - db_to_exclude # Add slave databases set to internal state. self._slaves[slave_key]['databases'] = slave_dbs # Report databases not on base server and filtered by replication. dbs_not_in_base_srv = slave_dbs - base_server_dbs filtered_dbs = set( [db for db in dbs_not_in_base_srv if self._is_rpl_filtered(db, slave=self._base_server_key)] ) dbs_not_in_base_srv -= filtered_dbs for db in filtered_dbs: print("# [SKIP] Database '{0}' - filtered by replication " "rule on base server.".format(db)) if dbs_not_in_base_srv: issues_count += len(dbs_not_in_base_srv) plural = 's' if len(dbs_not_in_base_srv) > 1 else '' print("# [DIFF] Database{0} NOT on base server but found on " "'{1}': {2}".format(plural, slave_key, ",".join(dbs_not_in_base_srv))) # Determine server to check base replication filtering options. filter_srv = None if self._get_master() else self._base_server_key # Check data consistency for each table on the base server. for db_name in base_server_dbs: # Skip database if filtered by defined replication rules. if self._is_rpl_filtered(db_name, slave=filter_srv): print("# [SKIP] Database '{0}' check - filtered by " "replication rule.".format(db_name)) continue print("# Checking '{0}' database...".format(db_name)) slaves_to_check = {} # Check if database exists on slaves (except the base server). for slave_key in slaves_except_base: # Skip database if filtered by defined replication rules. if self._is_rpl_filtered(db_name, slave=slave_key): print("# [SKIP] Database '{0}' check for '{1}' - filtered " "by replication rule.".format(db_name, slave_key)) continue if db_name in self._slaves[slave_key]['databases']: # Store slave database instance and common objects. slave_db = Database(self._get_slave(slave_key), db_name, options) slave_db.init() slave_dic = {'db': slave_db} in_both, in_basesrv, not_in_basesrv = get_common_objects( self._base_server, self._get_slave(slave_key), db_name, db_name, False, options) # Process tables to include/exclude from check (on slaves). if (data_to_include and db_name in data_to_include and data_to_include[db_name]): in_both = [ obj_row for obj_row in in_both if obj_row[1][0] in data_to_include[db_name] ] in_basesrv = [ obj_row for obj_row in in_basesrv if obj_row[1][0] in data_to_include[db_name] ] not_in_basesrv = [ obj_row for obj_row in not_in_basesrv if obj_row[1][0] in data_to_include[db_name] ] if (data_to_exclude and db_name in data_to_exclude and data_to_exclude[db_name]): in_both = [ obj_row for obj_row in in_both if obj_row[1][0] not in data_to_exclude[db_name] ] in_basesrv = [ obj_row for obj_row in in_basesrv if obj_row[1][0] not in data_to_exclude[db_name] ] not_in_basesrv = [ obj_row for obj_row in not_in_basesrv if obj_row[1][0] not in data_to_exclude[db_name] ] slave_dic['in_both'] = in_both slave_dic['in_basesrv'] = in_basesrv slaves_to_check[slave_key] = slave_dic # Report tables not on base server and filtered by # replication. tbls_not_in = set( [obj_row[1][0] for obj_row in not_in_basesrv if obj_row[0] == 'TABLE'] ) filtered_tbls = set( [tbl for tbl in tbls_not_in if self._is_rpl_filtered( db_name, tbl_name=tbl, slave=self._base_server_key )] ) tbls_not_in -= filtered_tbls for tbl in filtered_tbls: print("# [SKIP] Table '{0}' - filtered by replication " "rule on base server.".format(tbl)) if tbls_not_in: plural = 's' if len(tbls_not_in) > 1 else '' print("# [DIFF] Table{0} NOT on base server but " "found on '{1}': " "{2}".format(plural, slave_key, ", ".join(tbls_not_in))) issues_count += len(tbls_not_in) else: print("# [DIFF] Database '{0}' NOT on server " "'{1}'.".format(db_name, slave_key)) issues_count += 1 # Only check database if at least one slave has it. if slaves_to_check: db = Database(self._base_server, db_name, options) db.init() for db_obj in db.get_next_object(): obj_type = db_obj[0] obj_name = db_obj[1][0] # Process tables to include/exclude from check (on base # server). if (data_to_include and data_to_include[db_name] and obj_name not in data_to_include[db_name]): # Skip to the next object if not in data to include. continue if (data_to_exclude and data_to_exclude[db_name] and obj_name in data_to_exclude[db_name]): # Skip to the next object if in data to exclude. continue checksum_task = [] # Check object data on all valid slaves. for slave_key in slaves_to_check: # Skip table if filtered by defined replication rules. if (obj_type == 'TABLE' and self._is_rpl_filtered(db_name, obj_name, slave=slave_key)): print("# [SKIP] Table '{0}' check for '{1}' - " "filtered by replication rule." "".format(obj_name, slave_key)) continue slave_dic = slaves_to_check[slave_key] # Check if object does not exist on Slave. if self._exist_in_obj_list(obj_name, obj_type, slave_dic['in_basesrv']): print("# [DIFF] {0} '{1}.{2}' NOT on server " "'{3}'.".format(obj_type.capitalize(), db_name, obj_name, slave_key)) issues_count += 1 continue # Quote object name with backticks. q_obj = '{0}.{1}'.format( quote_with_backticks(db_name), quote_with_backticks(obj_name) ) # Check object definition. def_diff = diff_objects( self._base_server, self._get_slave(slave_key), q_obj, q_obj, diff_options, obj_type ) if def_diff: print("# [DIFF] {0} {1} definition is " "different on '{2}'." "".format(obj_type.capitalize(), q_obj, slave_key)) issues_count += 1 if self._verbosity: for diff in def_diff[3:]: print("# {0}".format(diff)) continue # Add slave to table checksum task. checksum_task.append(slave_key) # Perform table checksum on valid slaves. if checksum_task and obj_type == 'TABLE': print("# - Checking '{0}' table data..." "".format(obj_name)) num_issues = self._check_table_data_sync(q_obj, checksum_task) issues_count += num_issues print("#\n#...done.\n#") str_issues_count = 'No' if issues_count == 0 else str(issues_count) plural = 's' if issues_count > 1 else '' print("# SUMMARY: {0} data consistency issue{1} found.\n" "#".format(str_issues_count, plural)) return issues_count
RPLSynchronizer
asgi.py
""" ASGI config for CongoCart project.
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CongoCart.settings') application = get_asgi_application()
It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see
version_adjuster.rs
use crate::imp::json_to_rust::validation::validate_root::validate_root; use crate::imp::version_adjuster::adjust_mut_list::adjust_mut_list; //use crate::{HashM, HashMt}; use crate::error::CoreResult; use crate::imp::json_to_rust::names::Names; use crate::imp::structs::root_obj::RootObject; use crate::imp::structs::root_value::RootValue; use crate::imp::structs::root_sab_value::RootSabValue; use std::sync::Arc; // paramのsabunがあれば上書き、mut_listはoldのものを全部入れ、(あるなら)newの方のものは全削除して入れ替える // 基本的に、新バージョンのjsonと旧バージョンのデータが有り、旧バージョンのデータはRootのsabunとMutListには変更が加えられているだろう // Defaultが更新されるので、undefinedが設定される。 /// Adjust old data to be compatible with the new version. /// The strategy of the adjustment is described in the manual. pub fn adjust_versions(new : RootObject, old : RootObject, validation : bool) -> CoreResult<RootObject>{ let (def, mut sabun_v, old_hash, meta) = new.deconstruct(); let sabun = Arc::make_mut(&mut sabun_v); //let mut new_map :HashM<String, (usize, RootValue)> = HashMt::with_capacity(def.len()); let (old_def,
mut old_sabun, _, _) = old.deconstruct(); let old_sabun = Arc::make_mut(&mut old_sabun); for (def_key, (_id, def_value)) in def.def(){ match def_value{ RootValue::Param(p,v) =>{ let undef = if v.undefiable(){ if old_def.contains_key(def_key) == false{ sabun.insert(def_key.to_string(),RootSabValue::Param(p.to_undefined())); true } else { false } } else{ false }; if undef == false { if let Some(param) = old_sabun.remove(def_key) { sabun.insert(def_key.to_string(), param); } } }, RootValue::MList(m) =>{ let undef = if m.undefiable(){ if old_def.contains_key(def_key) == false{ sabun.insert(def_key.to_string(),RootSabValue::Mut(None)); true } else { false } } else{ false }; if undef == false { if let Some(RootSabValue::Mut(m_val)) = old_sabun.remove(def_key) { if let Some(m_val) = m_val { let new_m = adjust_mut_list(m.default(), m_val, &Names::new(def_key))?; sabun.insert(def_key.to_string(),RootSabValue::Mut(Some(new_m))); } else{ sabun.insert(def_key.to_string(),RootSabValue::Mut(None)); } } } }, _ =>{ //MutとParam以外にadjustする対象はないはず }, } } let new = RootObject::construct(def, sabun_v, old_hash, meta); if validation{ validate_root(&new, true)? } return Ok(new); }
IVoteModel.ts
import Mongoose = require("mongoose");
voteId: String; postId: String; userId: String; voteValue: Number; timestamp: Date; // same format as comment model } export {IVoteModel};
interface IVoteModel extends Mongoose.Document {
filter_test.go
/* Copyright IBM Corp. 2016 All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package broadcastfilter import ( "testing" cb "github.com/hyperledger/fabric/protos/common" ) var RejectRule = Rule(rejectRule{}) type rejectRule struct{} func (r rejectRule) Apply(message *cb.Envelope) Action { return Reject } var ForwardRule = Rule(forwardRule{}) type forwardRule struct{} func (r forwardRule) Apply(message *cb.Envelope) Action { return Forward } func TestEmptyRejectRule(t *testing.T) { rs := NewRuleSet([]Rule{EmptyRejectRule}) result, rule := rs.Apply(&cb.Envelope{}) if result != Reject { t.Fatalf("Should have rejected") } if rule != EmptyRejectRule { t.Fatalf("Rejected but not for the right rule") } result, _ = rs.Apply(&cb.Envelope{Payload: []byte("fakedata")}) if result != Forward { t.Fatalf("Should have forwarded") } } func TestAcceptReject(t *testing.T) { rs := NewRuleSet([]Rule{AcceptRule, RejectRule})
if rule != AcceptRule { t.Fatalf("Accepted but not for the right rule") } } func TestRejectAccept(t *testing.T) { rs := NewRuleSet([]Rule{RejectRule, AcceptRule}) result, rule := rs.Apply(&cb.Envelope{}) if result != Reject { t.Fatalf("Should have rejected") } if rule != RejectRule { t.Fatalf("Rejected but not for the right rule") } } func TestForwardAccept(t *testing.T) { rs := NewRuleSet([]Rule{ForwardRule, AcceptRule}) result, rule := rs.Apply(&cb.Envelope{}) if result != Accept { t.Fatalf("Should have accepted") } if rule != AcceptRule { t.Fatalf("Accepted but not for the right rule") } } func TestForward(t *testing.T) { rs := NewRuleSet([]Rule{ForwardRule}) result, rule := rs.Apply(&cb.Envelope{}) if result != Forward { t.Fatalf("Should have forwarded") } if rule != nil { t.Fatalf("Forwarded but rule is set") } } func TestNoRule(t *testing.T) { rs := NewRuleSet([]Rule{}) result, rule := rs.Apply(&cb.Envelope{}) if result != Forward { t.Fatalf("Should have forwarded") } if rule != nil { t.Fatalf("Forwarded but rule is set") } }
result, rule := rs.Apply(&cb.Envelope{}) if result != Accept { t.Fatalf("Should have accepted") }
test_logging.py
import logging import os import unittest import pypesto import pypesto.logging class LoggingTest(unittest.TestCase): def test_optimize(self): # logging pypesto.logging.log_to_console(logging.WARN) filename = ".test_logging.tmp" pypesto.logging.log_to_file(logging.DEBUG, filename) logger = logging.getLogger('pypesto') if os.path.exists(filename): os.remove(filename) fh = logging.FileHandler(filename) fh.setLevel(logging.DEBUG) logger.addHandler(fh) logger.info("start test") # problem definition def fun(_): raise Exception("This function cannot be called.") objective = pypesto.Objective(fun=fun) problem = pypesto.Problem(objective, -1, 1) optimizer = pypesto.ScipyOptimizer() options = {'allow_failed_starts': True} # optimization pypesto.minimize(problem, optimizer, 5, options=options) # assert logging worked self.assertTrue(os.path.exists(filename)) f = open(filename, 'rb') content = str(f.read()) f.close() # tidy up os.remove(filename)
# check if error message got inserted self.assertTrue("fail" in content) if __name__ == '__main__': suite = unittest.TestSuite() suite.addTest(LoggingTest()) unittest.main()
parser.rs
//! Parser for FuTIL programs. use super::ast::{self, BitNum, NumType}; use crate::errors::{self, FutilResult, Span}; use crate::ir; use pest::prec_climber::{Assoc, Operator, PrecClimber}; use pest_consume::{match_nodes, Error, Parser}; use std::collections::HashMap; use std::fs; use std::io::Read; use std::path::PathBuf; use std::rc::Rc; type ParseResult<T> = Result<T, Error<Rule>>; // user data is the input program so that we can create ir::Id's // that have a reference to the input string type Node<'i> = pest_consume::Node<'i, Rule, Rc<String>>; // include the grammar file so that Cargo knows to rebuild this file on grammar changes const _GRAMMAR: &str = include_str!("futil_syntax.pest"); // Define the precedence of binary operations. We use `lazy_static` so that // this is only ever constructed once. lazy_static::lazy_static! { static ref PRECCLIMBER: PrecClimber<Rule> = PrecClimber::new( vec![ // loosest binding Operator::new(Rule::guard_or, Assoc::Left), Operator::new(Rule::guard_and, Assoc::Left), Operator::new(Rule::guard_leq, Assoc::Left), Operator::new(Rule::guard_geq, Assoc::Left), Operator::new(Rule::guard_lt, Assoc::Left), Operator::new(Rule::guard_gt, Assoc::Left), Operator::new(Rule::guard_eq, Assoc::Left), Operator::new(Rule::guard_neq, Assoc::Left), Operator::new(Rule::guard_not, Assoc::Right) // tighest binding ] ); } #[derive(Parser)] #[grammar = "frontend/futil_syntax.pest"] pub struct FutilParser; impl FutilParser { /// Parse a FuTIL program into an AST representation. pub fn parse_file(path: &PathBuf) -> FutilResult<ast::NamespaceDef> { let content = &fs::read(path).map_err(|err| { errors::Error::InvalidFile(format!( "Failed to read {}: {}", path.to_string_lossy(), err.to_string() )) })?; let string_content = std::str::from_utf8(content)?; let inputs = FutilParser::parse_with_userdata( Rule::file, string_content, Rc::new(string_content.to_string()), )?; let input = inputs.single()?; Ok(FutilParser::file(input)?) } pub fn parse<R: Read>(mut r: R) -> FutilResult<ast::NamespaceDef> { let mut buf = String::new(); r.read_to_string(&mut buf).map_err(|err| { errors::Error::InvalidFile(format!( "Failed to parse buffer: {}", err.to_string() )) })?; let inputs = FutilParser::parse_with_userdata( Rule::file, &buf, Rc::new(buf.to_string()), )?; let input = inputs.single()?; Ok(FutilParser::file(input)?) } } #[pest_consume::parser] impl FutilParser { fn EOI(_input: Node) -> ParseResult<()> { Ok(()) } fn identifier(input: Node) -> ParseResult<ir::Id> { Ok(ir::Id::new( input.as_str(), Some(Span::new(input.as_span(), Rc::clone(input.user_data()))), )) } fn bitwidth(input: Node) -> ParseResult<u64> { Ok(match input.as_str().parse::<u64>() { Ok(x) => x, _ => panic!("Unable to parse '{}' as a u64", input.as_str()), }) } fn num_lit(input: Node) -> ParseResult<BitNum> { let raw = input.as_str(); if raw.contains("'d") { match raw.split("'d").collect::<Vec<_>>().as_slice() { [bits, val] => Ok(BitNum { width: bits.parse().unwrap(), num_type: NumType::Decimal, val: val.parse().unwrap(), span: Some(Span::new( input.as_span(), Rc::clone(input.user_data()), )), }), _ => unreachable!(), } } else if raw.contains("'b") { match raw.split("'b").collect::<Vec<_>>().as_slice() { [bits, val] => Ok(BitNum { width: bits.parse().unwrap(), num_type: NumType::Binary, val: u64::from_str_radix(val, 2).unwrap(), span: Some(Span::new( input.as_span(), Rc::clone(input.user_data()), )), }), _ => unreachable!(), } } else if raw.contains("'x") { match raw.split("'x").collect::<Vec<_>>().as_slice() { [bits, val] => Ok(BitNum { width: bits.parse().unwrap(), num_type: NumType::Hex, val: u64::from_str_radix(val, 16).unwrap(), span: Some(Span::new( input.as_span(), Rc::clone(input.user_data()), )), }), _ => unreachable!(), } } else if raw.contains("'o") { match raw.split("'o").collect::<Vec<_>>().as_slice() { [bits, val] => Ok(BitNum { width: bits.parse().unwrap(), num_type: NumType::Octal, val: u64::from_str_radix(val, 8).unwrap(), span: Some(Span::new( input.as_span(), Rc::clone(input.user_data()), )), }), _ => unreachable!(), } } else { unreachable!() } } fn char(input: Node) -> ParseResult<&str> { Ok(input.as_str()) } fn string_lit(input: Node) -> ParseResult<String> { Ok(match_nodes!( input.into_children(); [char(c)..] => c.collect::<Vec<_>>().join("") )) } fn signature(input: Node) -> ParseResult<ast::Signature> { Ok(match_nodes!( input.into_children(); [io_ports(inputs), signature_return(outputs)] => ast::Signature { inputs, outputs }, [io_ports(inputs)] => ast::Signature { inputs, outputs: vec![] }, [signature_return(outputs)] => ast::Signature { inputs: vec![], outputs }, [] => ast::Signature { inputs: vec![], outputs: vec![] } )) } fn signature_return(input: Node) -> ParseResult<Vec<ast::Portdef>> { Ok(match_nodes!( input.into_children(); [io_ports(p)] => p, [] => vec![] )) } fn io_port(input: Node) -> ParseResult<ast::Portdef> { Ok(match_nodes![ input.into_children(); [identifier(id), bitwidth(bw)] => ast::Portdef { name: id, width: bw }]) } fn io_ports(input: Node) -> ParseResult<Vec<ast::Portdef>> { Ok(match_nodes![ input.into_children(); [io_port(p)..] => p.collect()]) } fn args(input: Node) -> ParseResult<Vec<u64>> { Ok(match_nodes!( input.into_children(); [bitwidth(bw)..] => bw.collect(), [] => vec![] )) } fn primitive_cell(input: Node) -> ParseResult<ast::Cell> { Ok(match_nodes!( input.into_children(); [identifier(id), identifier(prim), args(args)] => ast::Cell::prim(id, prim, args) )) } fn component_cell(input: Node) -> ParseResult<ast::Cell> { Ok(match_nodes!( input.into_children(); [identifier(id), identifier(name)] => ast::Cell::decl(id, name) )) } fn cells(input: Node) -> ParseResult<Vec<ast::Cell>> { input .into_children() .map(|node| match node.as_rule() { Rule::primitive_cell => Self::primitive_cell(node), Rule::component_cell => Self::component_cell(node), _ => unreachable!(), }) .collect() } fn port(input: Node) -> ParseResult<ast::Port> { Ok(match_nodes!( input.into_children(); [identifier(component), identifier(port)] => ast::Port::Comp { component, port }, [identifier(port)] => ast::Port::This { port } )) } fn hole(input: Node) -> ParseResult<ast::Port> { Ok(match_nodes!( input.into_children(); [identifier(group), identifier(name)] => ast::Port::Hole { group, name } )) } fn LHS(input: Node) -> ParseResult<ast::Port> { Ok(match_nodes!( input.into_children(); [port(port)] => port, [hole(hole)] => hole )) } fn expr(input: Node) -> ParseResult<ast::Atom> { Ok(match_nodes!( input.into_children(); [LHS(port)] => ast::Atom::Port(port), [num_lit(num)] => ast::Atom::Num(num) )) } fn guard_not(_input: Node) -> ParseResult<()> { Ok(()) } #[prec_climb(term, PRECCLIMBER)] fn guard_expr( l: ast::GuardExpr, op: Node, r: ast::GuardExpr, ) -> ParseResult<ast::GuardExpr> { match op.as_rule() { Rule::guard_eq => Ok(ast::GuardExpr::Eq(Box::new(l), Box::new(r))), Rule::guard_neq => { Ok(ast::GuardExpr::Neq(Box::new(l), Box::new(r))) } Rule::guard_leq => { Ok(ast::GuardExpr::Leq(Box::new(l), Box::new(r))) } Rule::guard_geq => { Ok(ast::GuardExpr::Geq(Box::new(l), Box::new(r))) } Rule::guard_lt => Ok(ast::GuardExpr::Lt(Box::new(l), Box::new(r))), Rule::guard_gt => Ok(ast::GuardExpr::Gt(Box::new(l), Box::new(r))), Rule::guard_or => Ok(ast::GuardExpr::Or(vec![l, r])), Rule::guard_and => Ok(ast::GuardExpr::And(vec![l, r])), _ => unreachable!(), } } fn term(input: Node) -> ParseResult<ast::GuardExpr> { Ok(match_nodes!( input.into_children(); [guard_expr(guard)] => guard, [expr(e)] => ast::GuardExpr::Atom(e), [guard_not(_), guard_expr(e)] => ast::GuardExpr::Not(Box::new(e)), [guard_not(_), expr(e)] => ast::GuardExpr::Not(Box::new(ast::GuardExpr::Atom(e))) )) } fn switch_stmt(input: Node) -> ParseResult<ast::Guard> { Ok(match_nodes!( input.into_children(); [guard_expr(guard), expr(expr)] => ast::Guard { guard: Some(guard), expr }, )) } fn wire(input: Node) -> ParseResult<ast::Wire> { Ok(match_nodes!( input.into_children(); [LHS(dest), expr(expr)] => ast::Wire { src: ast::Guard { guard: None, expr }, dest }, [LHS(dest), switch_stmt(src)] => ast::Wire { src, dest } )) } fn key_value(input: Node) -> ParseResult<(String, u64)> { Ok(match_nodes!( input.into_children(); [string_lit(key), bitwidth(num)] => (key, num) )) } fn attributes(input: Node) -> ParseResult<HashMap<String, u64>> { Ok(match_nodes!( input.into_children(); [key_value(kvs)..] => kvs.collect() )) } fn group(input: Node) -> ParseResult<ast::Group> { Ok(match_nodes!( input.into_children(); [identifier(name), attributes(attrs), wire(wire)..] => ast::Group { name, attributes: attrs, wires: wire.collect() }, [identifier(name), wire(wire)..] => ast::Group { name, attributes: HashMap::new(), wires: wire.collect() } )) } fn connections(input: Node) -> ParseResult<Vec<ast::Connection>> { input .into_children() .map(|node| match node.as_rule() { Rule::wire => Ok(ast::Connection::Wire(Self::wire(node)?)), Rule::group => Ok(ast::Connection::Group(Self::group(node)?)), _ => unreachable!(), }) .collect() } fn enable(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [identifier(name)] => ast::Control::Enable { comp: name } )) } fn seq(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [stmt(stmt)..] => ast::Control::Seq { stmts: stmt.collect() } )) } fn par(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [stmt(stmt)..] => ast::Control::Par { stmts: stmt.collect() } )) }
fn if_stmt(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [port(port), identifier(cond), stmt(stmt)] => ast::Control::If { port, cond, tbranch: Box::new(stmt), fbranch: Box::new(ast::Control::Empty{}) }, [port(port), identifier(cond), stmt(tbranch), stmt(fbranch)] => ast::Control::If { port, cond, tbranch: Box::new(tbranch), fbranch: Box::new(fbranch) }, [port(port), identifier(cond), stmt(tbranch), if_stmt(fbranch)] => ast::Control::If { port, cond, tbranch: Box::new(tbranch), fbranch: Box::new(fbranch) }, )) } fn while_stmt(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [port(port), identifier(cond), stmt(stmt)] => ast::Control::While { port, cond, body: Box::new(stmt), } )) } fn stmt(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [enable(data)] => data, [seq(data)] => data, [par(data)] => data, [if_stmt(data)] => data, [while_stmt(data)] => data, )) } fn control(input: Node) -> ParseResult<ast::Control> { Ok(match_nodes!( input.into_children(); [stmt(stmt)] => stmt, [] => ast::Control::Empty{} )) } fn component(input: Node) -> ParseResult<ast::ComponentDef> { Ok(match_nodes!( input.into_children(); [identifier(id), signature(sig), cells(cells), connections(connections), control(control)] => ast::ComponentDef { name: id, signature: sig, cells, connections, control, }, [identifier(id), cells(cells), connections(connections), control(control)] => ast::ComponentDef { name: id, signature: ast::Signature { inputs: vec![], outputs: vec![] }, cells, connections, control, }, )) } fn imports(input: Node) -> ParseResult<Vec<String>> { Ok(match_nodes!( input.into_children(); [string_lit(path)..] => path.collect() )) } fn file(input: Node) -> ParseResult<ast::NamespaceDef> { Ok(match_nodes!( input.into_children(); [imports(imports), component(comps).., EOI] => ast::NamespaceDef { libraries: imports, components: comps.collect() } )) } }
env.py
# type: ignore import os from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config, pool from learning.entities import Base # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = os.environ["DB_URL"] context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations()
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, url=os.environ["ALEMBIC_DB_URL"], ) with connectable.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
routes_test.py
import os from pathlib import Path import unittest import json from starlette.testclient import TestClient from ml_base.utilities import ModelManager os.chdir(Path(__file__).resolve().parent.parent.parent) os.environ["REST_CONFIG"] = "examples/rest_config.yaml" from rest_model_service.main import app, create_app from rest_model_service.configuration import Model class RoutesTests(unittest.TestCase): def test_root(self): # arrange client = TestClient(app) # act response = client.get("/") # assert self.assertTrue(response.status_code == 200) # cleanup model_manager = ModelManager() model_manager.clear_instance() def test_get_models(self): # arrange client = TestClient(app) # act response = client.get("/api/models") # assert self.assertTrue(response.status_code == 200) self.assertTrue(response.json() == { "models": [ { "display_name": "Iris Model", "qualified_name": "iris_model", "description": "Model for predicting the species of a flower based on its measurements.", "version": "1.0.0" } ] }) # cleanup model_manager = ModelManager() model_manager.clear_instance() def test_prediction(self): # arrange client = TestClient(app) # act response = client.post("/api/models/iris_model/prediction", data=json.dumps({ "sepal_length": 6.0, "sepal_width": 5.0, "petal_length": 3.0, "petal_width": 2.0 })) # assert self.assertTrue(response.status_code == 200) self.assertTrue(response.json() == { "species": "Iris setosa" }) # cleanup model_manager = ModelManager() model_manager.clear_instance() def test_prediction_with_bad_data(self): # arrange
def test_prediction_with_bad_configuration(self): # arrange, act, assert with self.assertRaises(ValueError) as e: app = create_app("REST Model Service", [Model(qualified_name="asdf", class_path="tests.mocks.IrisModel", create_endpoint=True)]) # cleanup model_manager = ModelManager() model_manager.clear_instance() def test_prediction_with_no_endpoint(self): # arrange app = create_app("REST Model Service", [Model(qualified_name="iris_model", class_path="tests.mocks.IrisModel", create_endpoint=False)]) client = TestClient(app) # act response = client.post("/api/models/iris_model/prediction", data=json.dumps({ "sepal_length": 16.0, "sepal_width": 5.0, "petal_length": 3.0, "petal_width": 2.0 })) # assert self.assertTrue(response.status_code == 404) # cleanup model_manager = ModelManager() model_manager.clear_instance() if __name__ == '__main__': unittest.main()
app = create_app("REST Model Service", [Model(qualified_name="iris_model", class_path="tests.mocks.IrisModel", create_endpoint=True)]) client = TestClient(app) # act response = client.post("/api/models/iris_model/prediction", data=json.dumps({ "sepal_length": 16.0, "sepal_width": 5.0, "petal_length": 3.0, "petal_width": 2.0 })) # assert self.assertTrue(response.status_code == 422) # cleanup model_manager = ModelManager() model_manager.clear_instance()
timeout.py
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) from threading import Thread import functools _thread_by_func = {} class TimeoutException(Exception): """ Raised when a function runtime exceeds the limit set. """ pass class ThreadMethod(Thread): """ Descendant of `Thread` class. Run the specified target method with the specified arguments. Store result and exceptions. From: https://code.activestate.com/recipes/440569/ """ def __init__(self, target, args, kwargs): Thread.__init__(self) self.setDaemon(True) self.target, self.args, self.kwargs = target, args, kwargs self.start() def run(self): try: self.result = self.target(*self.args, **self.kwargs) except Exception as e: self.exception = e else: self.exception = None def timeout(timeout): """ A decorator to timeout a function. Decorated method calls are executed in a separate new thread with a specified timeout. Also check if a thread for the same function already exists before creating a new one. Note: Compatible with Windows (thread based). """ def decorator(func):
def wrapper(*args, **kwargs): key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs) if key in _thread_by_func: # A thread for the same function already exists. worker = _thread_by_func[key] else: worker = ThreadMethod(func, args, kwargs) _thread_by_func[key] = worker worker.join(timeout) if worker.is_alive(): raise TimeoutException() del _thread_by_func[key] if worker.exception: raise worker.exception else: return worker.result return wrapper return decorator
@functools.wraps(func)
story-meta.ts
/** * MIT License * * Copyright © 2019 ADAM Timothée, BOUILLON Pierre, VARNIER Victor * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /** * @summary Wrapper for the story "meta" data */ export interface IStoryMeta { /** * @param author The story's author */ author: string; /** * @param overview The story's description */ overview: string; /** * @param tags The story's tags */ tags: Array<string>; /** * @param title The story's title */ title: string; } /** * @summary Concrete implementation of the wrapper for the story "meta" data */ export class St
mplements IStoryMeta { /** * @summary Default constructor * @param author The story's author * @param overview The story's description * @param tags The story's tags * @param title The story's title */ constructor( public author: string, public overview: string, public tags: Array<string>, public title: string, ) { } }
oryMeta i
dd_board_logger.py
# -*- coding:utf-8 -*- import json, os from shutil import rmtree from datetime import datetime from tensorboard_logger import configure as tbl_configure, log_value as tbl_log_value class DDBoard: """Version 0.4 Converts logs to "TensorBoard compatible" data.""" # Default values base_dir = "/opt/tensorboard/runs" # useful to have a default? #~ flush_time = 2 # Really needed? tb apparently refreshes/flushes its cache after each event written. See at the end of the __init__ part. def __init__(self, base_dir, sub_dir, del_dir = False): """ - base_dir = string, general cache directory used by tensorboard - sub_dir = string, subdirectory of the current run used by tensorboard
- del_dir = bolean, False if ommited. If set to false, the new graph is displayed after the preceding, if any. If set to true, the tensorboard cache directory will be deleted and the new graph will be the only one to appear. - flush_time = interval between 2 flushes of tb cache. """ if base_dir != "": self.base_dir = base_dir else: self.base_dir = DDBoard.base_dir if sub_dir != "": self.sub_dir = sub_dir else: self.sub_dir = "/run-" + datetime.now().strftime('%y%m%d-%H%M%S') self.run_dir = self.base_dir + "/" + self.sub_dir if (del_dir): if (os.path.isdir(self.run_dir)): rmtree(self.run_dir) # cleaning of the tb run directory #~ if ft != "": # To be deleted if tb flushtime is finally not taken in account #~ self.flush_time = ft #~ else: #~ self.flush_time = DDBoard.flush_time #~ tbl_configure(self.run_dir, flush_secs=DDBoard.flush_time) tbl_configure(self.run_dir) def ddb_logger(self, obs): """obs = the Python dict (aka JSON object) to be analyzed.""" for key in obs.keys(): if (key != "iteration"): value = obs[key] if isinstance(value,float): tbl_log_value(key, obs[key], int(obs["iteration"])) def ddb_logger_file(self,json_file): """json_file = the json file to be analyzed""" json_src = open(json_file, 'r') # Should we check the existence of the JSon source? for line in json_src: json_line = json.loads(line) self.ddb_logger(json_line)
build_docs.py
""" Build the README.rst either locally, or on Github for tagged commits. Usage: build_docs.py <TRAVIS_REPO_SLUG> build_docs.py (-h | --help) The TRAVIS_REPO_SLUG has following Format : "github_username/github_repository" Options: -h --help Show this screen """ # STDLIB import datetime import errno import logging import sys from typing import Dict # ext from docopt import docopt # type: ignore # OWN import lib_log_utils # type: ignore import rst_include # type: ignore # CONSTANTS & PROJECT SPECIFIC FUNCTIONS codeclimate_link_hash = "7d130ff52f3b507552ad" # for lib_detect_encoding def
(repository_slug: str, repository: str, repository_dashed: str) -> None: # PROJECT SPECIFIC logger = logging.getLogger('project_specific') pass def main(args: Dict[str, str]) -> None: logger = logging.getLogger('build_docs') logger.info('create the README.rst') travis_repo_slug = args['<TRAVIS_REPO_SLUG>'] repository = travis_repo_slug.split('/')[1] repository_dashed = repository.replace('_', '-') project_specific(travis_repo_slug, repository, repository_dashed) """ paths absolute, or relative to the location of the config file the notation for relative files is like on windows or linux - not like in python. so You might use ../../some/directory/some_document.rst to go two levels back. avoid absolute paths since You never know where the program will run. """ logger.info('include the include blocks') rst_include.rst_inc(source='./.docs/README_template.rst', target='./README.rst') logger.info('replace repository related strings') rst_include.rst_str_replace(source='./README.rst', target='', str_pattern='{repository_slug}', str_replace=travis_repo_slug, inplace=True) rst_include.rst_str_replace(source='./README.rst', target='', str_pattern='{repository}', str_replace=repository, inplace=True) rst_include.rst_str_replace(source='./README.rst', target='', str_pattern='{repository_dashed}', str_replace=repository_dashed, inplace=True) rst_include.rst_str_replace(source='./README.rst', target='', str_pattern='{last_update_yyyy}', str_replace=str(datetime.date.today().year + 1), inplace=True) rst_include.rst_str_replace(source='./README.rst', target='', str_pattern='{codeclimate_link_hash}', str_replace=codeclimate_link_hash, inplace=True) logger.info('done') sys.exit(0) if __name__ == '__main__': if sys.version_info < (3, 6): lib_log_utils.log_error('only Python Versions from 3.6 are supported') sys.exit(1) lib_log_utils.log_handlers.set_stream_handler() main_logger = logging.getLogger('main') try: _args = docopt(__doc__) main(_args) except FileNotFoundError: # see https://www.thegeekstuff.com/2010/10/linux-error-codes for error codes sys.exit(errno.ENOENT) # No such file or directory except FileExistsError: sys.exit(errno.EEXIST) # File exists except TypeError: sys.exit(errno.EINVAL) # Invalid Argument except ValueError: sys.exit(errno.EINVAL) # Invalid Argument
project_specific
__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import datetime import decimal import io import uuid from flask import current_app from flask import json as _json from flask import request from sqlalchemy import types import arrow text_type = str def _wrap_reader_for_text(fp, encoding): if isinstance(fp.read(0), bytes): fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) return fp def _wrap_writer_for_text(fp, encoding): try: fp.write('') except TypeError: fp = io.TextIOWrapper(fp, encoding) return fp class
(_json.JSONEncoder): """Custom JSON encoder that will serialize more complex datatypes. This class adds support for the following datatypes: - ``phonenumbers.phonenumber.PhoneNumber``: This will be serialized to a E.164 phonenumber. This will only be run if ``phonenumbers`` is installed. - ``decimal.Decimal``: This will serialize to a pretty decimal number with no trailing zeros and no unnecessary values. For example: - 2.01 -> 2.01 - 2.0 -> 2 - 2.010 -> 2.01 - 2.000 -> 2 - ``arrow.Arrow``: This will be serialized to an ISO8601 datetime string with the offset included. - ``datetime.datetime``: This will be serialized to an ISO8601 datetime string with the offset included. - ``datetime.date``: This will be serialized to an ISO8601 date string. Extended from http://flask.pocoo.org/snippets/119. """ def __init__(self, *args, **kwargs): super(JSONEncoder, self).__init__(*args, **kwargs) self.use_decimal = False def default(self, obj): """ Encode individual objects into their JSON representation. This method is used by :class:`flask.json.JSONEncoder` to encode individual items in the JSON object. Args: obj (object): Any Python object we wish to convert to JSON. Returns: str: The stringified, valid JSON representation of our provided object. """ if isinstance(obj, decimal.Decimal): obj = format(obj, 'f') str_digit = str(obj) return (str_digit.rstrip('0').rstrip('.') if '.' in str_digit else str_digit) elif isinstance(obj, types.TypeEngine): return str(obj) elif isinstance(obj, arrow.Arrow): return str(obj) if isinstance(obj, datetime.datetime): if obj.tzinfo: # eg: '2015-09-25T23:14:42.588601+00:00' return obj.isoformat('T') else: # No timezone present - assume UTC. # eg: '2015-09-25T23:14:42.588601Z' return obj.isoformat('T') + 'Z' if isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, uuid.UUID): return str(obj) try: return list(iter(obj)) except TypeError: pass return super(JSONEncoder, self).default(obj) def _dump_arg_defaults(kwargs): """Inject default arguments for dump functions.""" if current_app: kwargs.setdefault('cls', current_app.json_encoder) if not current_app.config['JSON_AS_ASCII']: kwargs.setdefault('ensure_ascii', False) kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) else: kwargs.setdefault('sort_keys', True) kwargs.setdefault('cls', JSONEncoder) def dumps(obj, **kwargs): """Serialize ``obj`` to a JSON formatted ``str`` by using the application's configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an application on the stack. This function can return ``unicode`` strings or ascii-only bytestrings by default which coerce into unicode strings automatically. That behavior by default is controlled by the ``JSON_AS_ASCII`` configuration variable and can be overridden by the simplejson ``ensure_ascii`` parameter. """ _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) rv = _json.dumps(obj, **kwargs) if encoding is not None and isinstance(rv, text_type): rv = rv.encode(encoding) return rv def dump(obj, fp, **kwargs): """Like :func:`dumps` but writes into a file object.""" _dump_arg_defaults(kwargs) encoding = kwargs.pop('encoding', None) if encoding is not None: fp = _wrap_writer_for_text(fp, encoding) _json.dump(obj, fp, **kwargs) def jsonify(*args, **kwargs): """ copied from the flask jsonify function with modifcations added """ indent = None separators = (',', ':') if current_app.config['JSONIFY_PRETTYPRINT_REGULAR']\ and not request.is_xhr: indent = 2 separators = (', ', ': ') if args and kwargs: raise TypeError( 'jsonify() behavior undefined when passed both args and kwargs') elif len(args) == 1: # single args are passed directly to dumps() data = args[0] else: data = args or kwargs return current_app.response_class( (dumps(data, indent=indent, separators=separators), '\n'), mimetype=current_app.config['JSONIFY_MIMETYPE'] )
JSONEncoder
__init__.py
from __future__ import unicode_literals
# TODO: Remove entirely if you don't register GStreamer elements below import pygst pygst.require('0.10') from mopidy import config, ext __version__ = '0.1.0' # TODO: If you need to log, use loggers named after the current Python module logger = logging.getLogger(__name__) class Extension(ext.Extension): dist_name = 'Mopidy-GPIO' ext_name = 'gpio' version = __version__ def get_default_config(self): conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf') return config.read(conf_file) def get_config_schema(self): schema = super(Extension, self).get_config_schema() schema['playpin'] = config.Integer() return schema def setup(self, registry): from .frontend import GpioFrontend registry.add('frontend', GpioFrontend)
import logging import os
test_sql_commands.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import time from azure_devtools.scenario_tests import AllowLargeResponse from azure.cli.core.util import CLIError from azure.cli.testsdk.base import execute from azure.cli.testsdk.exceptions import CliTestError from azure.cli.testsdk import ( JMESPathCheck, JMESPathCheckExists, JMESPathCheckGreaterThan, NoneCheck, ResourceGroupPreparer, ScenarioTest, StorageAccountPreparer, TestCli, LiveScenarioTest) from azure.cli.testsdk.preparers import ( AbstractPreparer, SingleValueReplacer) from azure.cli.command_modules.sql.custom import ( ClientAuthenticationType, ClientType) from datetime import datetime, timedelta from time import sleep # Constants server_name_prefix = 'clitestserver' server_name_max_length = 63 class SqlServerPreparer(AbstractPreparer, SingleValueReplacer): def __init__(self, name_prefix=server_name_prefix, parameter_name='server', location='westus', admin_user='admin123', admin_password='SecretPassword123', resource_group_parameter_name='resource_group', skip_delete=True): super(SqlServerPreparer, self).__init__(name_prefix, server_name_max_length) self.location = location self.parameter_name = parameter_name self.admin_user = admin_user self.admin_password = admin_password self.resource_group_parameter_name = resource_group_parameter_name self.skip_delete = skip_delete def create_resource(self, name, **kwargs): group = self._get_resource_group(**kwargs) template = 'az sql server create -l {} -g {} -n {} -u {} -p {}' execute(TestCli(), template.format(self.location, group, name, self.admin_user, self.admin_password)) return {self.parameter_name: name} def remove_resource(self, name, **kwargs): if not self.skip_delete: group = self._get_resource_group(**kwargs) execute(TestCli(), 'az sql server delete -g {} -n {} --yes --no-wait'.format(group, name)) def _get_resource_group(self, **kwargs): try: return kwargs.get(self.resource_group_parameter_name) except KeyError: template = 'To create a sql server account a resource group is required. Please add ' \ 'decorator @{} in front of this storage account preparer.' raise CliTestError(template.format(ResourceGroupPreparer.__name__, self.resource_group_parameter_name)) class SqlServerMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer(parameter_name='resource_group_1') @ResourceGroupPreparer(parameter_name='resource_group_2') def test_sql_server_mgmt(self, resource_group_1, resource_group_2, resource_group_location): server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length) server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length) admin_login = 'admin123' admin_passwords = ['SecretPassword123', 'SecretPassword456'] loc = 'westeurope' user = admin_login # test create sql server with minimal required parameters server_1 = self.cmd('sql server create -g {} --name {} -l {} ' '--admin-user {} --admin-password {}' .format(resource_group_1, server_name_1, loc, user, admin_passwords[0]), checks=[ JMESPathCheck('name', server_name_1), JMESPathCheck('resourceGroup', resource_group_1), JMESPathCheck('administratorLogin', user), JMESPathCheck('identity', None)]).get_output_in_json() # test list sql server should be 1 self.cmd('sql server list -g {}'.format(resource_group_1), checks=[JMESPathCheck('length(@)', 1)]) # test update sql server self.cmd('sql server update -g {} --name {} --admin-password {} -i' .format(resource_group_1, server_name_1, admin_passwords[1]), checks=[ JMESPathCheck('name', server_name_1), JMESPathCheck('resourceGroup', resource_group_1), JMESPathCheck('administratorLogin', user), JMESPathCheck('identity.type', 'SystemAssigned')]) # test update without identity parameter, validate identity still exists # also use --id instead of -g/-n self.cmd('sql server update --id {} --admin-password {}' .format(server_1['id'], admin_passwords[0]), checks=[ JMESPathCheck('name', server_name_1), JMESPathCheck('resourceGroup', resource_group_1), JMESPathCheck('administratorLogin', user), JMESPathCheck('identity.type', 'SystemAssigned')]) # test create another sql server, with identity this time self.cmd('sql server create -g {} --name {} -l {} -i ' '--admin-user {} --admin-password {}' .format(resource_group_2, server_name_2, loc, user, admin_passwords[0]), checks=[ JMESPathCheck('name', server_name_2), JMESPathCheck('resourceGroup', resource_group_2), JMESPathCheck('administratorLogin', user), JMESPathCheck('identity.type', 'SystemAssigned')]) # test list sql server in that group should be 1 self.cmd('sql server list -g {}'.format(resource_group_2), checks=[JMESPathCheck('length(@)', 1)]) # test list sql server in the subscription should be at least 2 self.cmd('sql server list', checks=[JMESPathCheckGreaterThan('length(@)', 1)]) # test show sql server self.cmd('sql server show -g {} --name {}' .format(resource_group_1, server_name_1), checks=[ JMESPathCheck('name', server_name_1), JMESPathCheck('resourceGroup', resource_group_1), JMESPathCheck('administratorLogin', user)]) self.cmd('sql server show --id {}' .format(server_1['id']), checks=[ JMESPathCheck('name', server_name_1), JMESPathCheck('resourceGroup', resource_group_1), JMESPathCheck('administratorLogin', user)]) self.cmd('sql server list-usages -g {} -n {}' .format(resource_group_1, server_name_1), checks=[JMESPathCheck('[0].resourceName', server_name_1)]) # test delete sql server self.cmd('sql server delete --id {} --yes' .format(server_1['id']), checks=NoneCheck()) self.cmd('sql server delete -g {} --name {} --yes' .format(resource_group_2, server_name_2), checks=NoneCheck()) # test list sql server should be 0 self.cmd('sql server list -g {}'.format(resource_group_1), checks=[NoneCheck()]) class SqlServerFirewallMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_firewall_mgmt(self, resource_group, resource_group_location, server): rg = resource_group firewall_rule_1 = 'rule1' start_ip_address_1 = '0.0.0.0' end_ip_address_1 = '255.255.255.255' firewall_rule_2 = 'rule2' start_ip_address_2 = '123.123.123.123' end_ip_address_2 = '123.123.123.124' # allow_all_azure_ips_rule = 'AllowAllAzureIPs' # allow_all_azure_ips_address = '0.0.0.0' # test sql server firewall-rule create fw_rule_1 = self.cmd('sql server firewall-rule create --name {} -g {} --server {} ' '--start-ip-address {} --end-ip-address {}' .format(firewall_rule_1, rg, server, start_ip_address_1, end_ip_address_1), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_1)]).get_output_in_json() # test sql server firewall-rule show by group/server/name self.cmd('sql server firewall-rule show --name {} -g {} --server {}' .format(firewall_rule_1, rg, server), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_1)]) # test sql server firewall-rule show by id self.cmd('sql server firewall-rule show --id {}' .format(fw_rule_1['id']), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_1)]) # test sql server firewall-rule update by group/server/name self.cmd('sql server firewall-rule update --name {} -g {} --server {} ' '--start-ip-address {} --end-ip-address {}' .format(firewall_rule_1, rg, server, start_ip_address_2, end_ip_address_2), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_2), JMESPathCheck('endIpAddress', end_ip_address_2)]) # test sql server firewall-rule update by id self.cmd('sql server firewall-rule update --id {} ' '--start-ip-address {}' .format(fw_rule_1['id'], start_ip_address_1), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_2)]) self.cmd('sql server firewall-rule update --name {} -g {} --server {} ' '--end-ip-address {}' .format(firewall_rule_1, rg, server, end_ip_address_1), checks=[ JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_1)]) # test sql server firewall-rule create another rule self.cmd('sql server firewall-rule create --name {} -g {} --server {} ' '--start-ip-address {} --end-ip-address {}' .format(firewall_rule_2, rg, server, start_ip_address_2, end_ip_address_2), checks=[ JMESPathCheck('name', firewall_rule_2), JMESPathCheck('resourceGroup', rg), JMESPathCheck('startIpAddress', start_ip_address_2), JMESPathCheck('endIpAddress', end_ip_address_2)]) # test sql server firewall-rule list self.cmd('sql server firewall-rule list -g {} --server {}' .format(rg, server), checks=[JMESPathCheck('length(@)', 2)]) # # test sql server firewall-rule create azure ip rule # self.cmd('sql server firewall-rule allow-all-azure-ips -g {} --server {} ' # .format(rg, server), checks=[ # JMESPathCheck('name', allow_all_azure_ips_rule), # JMESPathCheck('resourceGroup', rg), # JMESPathCheck('startIpAddress', allow_all_azure_ips_address), # JMESPathCheck('endIpAddress', allow_all_azure_ips_address)]) # # test sql server firewall-rule list # self.cmd('sql server firewall-rule list -g {} --server {}' # .format(rg, server), checks=[JMESPathCheck('length(@)', 3)]) # test sql server firewall-rule delete self.cmd('sql server firewall-rule delete --id {}' .format(fw_rule_1['id']), checks=NoneCheck()) self.cmd('sql server firewall-rule list -g {} --server {}' .format(rg, server), checks=[JMESPathCheck('length(@)', 1)]) self.cmd('sql server firewall-rule delete --name {} -g {} --server {}' .format(firewall_rule_2, rg, server), checks=NoneCheck()) self.cmd('sql server firewall-rule list -g {} --server {}' .format(rg, server), checks=[NoneCheck()]) class SqlServerDbMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer(location='eastus2') @SqlServerPreparer(location='eastus2') def test_sql_db_mgmt(self, resource_group, resource_group_location, server): database_name = "cliautomationdb01" database_name_2 = "cliautomationdb02" database_name_3 = "cliautomationdb03" update_service_objective = 'S1' update_storage = '10GB' update_storage_bytes = str(10 * 1024 * 1024 * 1024) rg = resource_group loc_display = 'East US 2' # test sql db commands db1 = self.cmd('sql db create -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online'), JMESPathCheck('zoneRedundant', False)]).get_output_in_json() self.cmd('sql db list -g {} --server {}' .format(rg, server), checks=[ JMESPathCheck('length(@)', 2), JMESPathCheck('sort([].name)', sorted([database_name, 'master'])), JMESPathCheck('[0].resourceGroup', rg), JMESPathCheck('[1].resourceGroup', rg)]) self.cmd('sql db list-usages -g {} --server {} --name {}' .format(rg, server, database_name), checks=[JMESPathCheck('[0].resourceName', database_name)]) # Show by group/server/name self.cmd('sql db show -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', rg)]) # Show by id self.cmd('sql db show --id {}' .format(db1['id']), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', rg)]) # Update by group/server/name self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --max-size {}' ' --set tags.key1=value1' .format(rg, server, database_name, update_service_objective, update_storage), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('requestedServiceObjectiveName', update_service_objective), JMESPathCheck('maxSizeBytes', update_storage_bytes), JMESPathCheck('tags.key1', 'value1')]) # Update by id self.cmd('sql db update --id {} --set tags.key2=value2' .format(db1['id']), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('requestedServiceObjectiveName', update_service_objective), JMESPathCheck('maxSizeBytes', update_storage_bytes), JMESPathCheck('tags.key2', 'value2')]) # Rename by group/server/name db2 = self.cmd('sql db rename -g {} -s {} -n {} --new-name {}' .format(rg, server, database_name, database_name_2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_2)]).get_output_in_json() # Rename by id db3 = self.cmd('sql db rename --id {} --new-name {}' .format(db2['id'], database_name_3), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_3)]).get_output_in_json() # Delete by group/server/name self.cmd('sql db delete -g {} --server {} --name {} --yes' .format(rg, server, database_name_3), checks=[NoneCheck()]) # Delete by id self.cmd('sql db delete --id {} --yes' .format(db3['id']), checks=[NoneCheck()]) class SqlServerDbOperationMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer(location='southeastasia') @SqlServerPreparer(location='southeastasia') def test_sql_db_operation_mgmt(self, resource_group, resource_group_location, server): database_name = "cliautomationdb01" update_service_objective = 'S1' # Create db self.cmd('sql db create -g {} -s {} -n {}' .format(resource_group, server, database_name), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', database_name), JMESPathCheck('status', 'Online')]) # Update DB with --no-wait self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --no-wait' .format(resource_group, server, database_name, update_service_objective)) # List operations ops = list( self.cmd('sql db op list -g {} -s {} -d {}' .format(resource_group, server, database_name, update_service_objective), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].resourceGroup', resource_group), JMESPathCheck('[0].databaseName', database_name) ]) .get_output_in_json()) # Cancel operation self.cmd('sql db op cancel -g {} -s {} -d {} -n {}' .format(resource_group, server, database_name, ops[0]['name'])) class SqlServerConnectionPolicyScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_server_connection_policy(self, resource_group, resource_group_location, server): # Show self.cmd('sql server conn-policy show -g {} -s {}' .format(resource_group, server), checks=[JMESPathCheck('connectionType', 'Default')]) # Update for type in ('Proxy', 'Default', 'Redirect'): self.cmd('sql server conn-policy update -g {} -s {} -t {}' .format(resource_group, server, type), checks=[JMESPathCheck('connectionType', type)]) class AzureActiveDirectoryAdministratorScenarioTest(LiveScenarioTest): # convert to ScenarioTest and re-record when ISSUE #6011 is fixed @ResourceGroupPreparer() @SqlServerPreparer() def test_aad_admin(self, resource_group, server): rg = resource_group sn = server oid = '5e90ef3b-9b42-4777-819b-25c36961ea4d' oid2 = 'e4d43337-d52c-4a0c-b581-09055e0359a0' user = 'DSEngAll' user2 = 'TestUser' self.cmd('sql server ad-admin create -s {} -g {} -i {} -u {}' .format(sn, rg, oid, user), checks=[JMESPathCheck('login', user), JMESPathCheck('sid', oid)]) self.cmd('sql server ad-admin list -s {} -g {}' .format(sn, rg), checks=[JMESPathCheck('[0].login', user)]) self.cmd('sql server ad-admin update -s {} -g {} -u {} -i {}' .format(sn, rg, user2, oid2), checks=[JMESPathCheck('login', user2), JMESPathCheck('sid', oid2)]) self.cmd('sql server ad-admin delete -s {} -g {}' .format(sn, rg)) self.cmd('sql server ad-admin list -s {} -g {}' .format(sn, rg), checks=[JMESPathCheck('login', None)]) class SqlServerDbCopyScenarioTest(ScenarioTest): @ResourceGroupPreparer(parameter_name='resource_group_1') @ResourceGroupPreparer(parameter_name='resource_group_2') @SqlServerPreparer(parameter_name='server1', resource_group_parameter_name='resource_group_1') @SqlServerPreparer(parameter_name='server2', resource_group_parameter_name='resource_group_2') def test_sql_db_copy(self, resource_group_1, resource_group_2, resource_group_location, server1, server2): database_name = "cliautomationdb01" database_copy_name = "cliautomationdb02" service_objective = 'S1' rg = resource_group_1 loc_display = 'West US' # create database self.cmd('sql db create -g {} --server {} --name {}' .format(rg, server1, database_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online')]) # copy database to same server (min parameters) self.cmd('sql db copy -g {} --server {} --name {} ' '--dest-name {}' .format(rg, server1, database_name, database_copy_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_copy_name) ]) # copy database to other server (max parameters) self.cmd('sql db copy -g {} --server {} --name {} ' '--dest-name {} --dest-resource-group {} --dest-server {} ' '--service-objective {}' .format(rg, server1, database_name, database_copy_name, resource_group_2, server2, service_objective), checks=[ JMESPathCheck('resourceGroup', resource_group_2), JMESPathCheck('name', database_copy_name), JMESPathCheck('requestedServiceObjectiveName', service_objective) ]) def _get_earliest_restore_date(db): return datetime.strptime(db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00") def _get_deleted_date(deleted_db): return datetime.strptime(deleted_db['deletionDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00") def _create_db_wait_for_first_backup(test, rg, server, database_name): # create db db = test.cmd('sql db create -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('status', 'Online')]).get_output_in_json() # Wait until earliestRestoreDate is in the past. When run live, this will take at least # 10 minutes. Unforunately there's no way to speed this up. earliest_restore_date = _get_earliest_restore_date(db) while datetime.utcnow() <= earliest_restore_date: sleep(10) # seconds return db class SqlServerDbRestoreScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_db_restore(self, resource_group, resource_group_location, server): rg = resource_group database_name = 'cliautomationdb01' # Standalone db restore_service_objective = 'S1' restore_edition = 'Standard' restore_standalone_database_name = 'cliautomationdb01restore1' restore_pool_database_name = 'cliautomationdb01restore2' elastic_pool = 'cliautomationpool1' # create elastic pool self.cmd('sql elastic-pool create -g {} -s {} -n {}' .format(rg, server, elastic_pool)) # Create database and wait for first backup to exist _create_db_wait_for_first_backup(self, rg, server, database_name) # Restore to standalone db self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}' ' --service-objective {} --edition {}' .format(rg, server, database_name, datetime.utcnow().isoformat(), restore_standalone_database_name, restore_service_objective, restore_edition), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', restore_standalone_database_name), JMESPathCheck('requestedServiceObjectiveName', restore_service_objective), JMESPathCheck('status', 'Online')]) # Restore to db into pool self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}' ' --elastic-pool {}' .format(rg, server, database_name, datetime.utcnow().isoformat(), restore_pool_database_name, elastic_pool), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', restore_pool_database_name), JMESPathCheck('elasticPoolName', elastic_pool), JMESPathCheck('status', 'Online')]) class SqlServerDbRestoreDeletedScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_db_restore_deleted(self, resource_group, resource_group_location, server): rg = resource_group database_name = 'cliautomationdb01' # Standalone db restore_service_objective = 'S1' restore_edition = 'Standard' restore_database_name1 = 'cliautomationdb01restore1' restore_database_name2 = 'cliautomationdb01restore2' # Create database and wait for first backup to exist _create_db_wait_for_first_backup(self, rg, server, database_name) # Delete database self.cmd('sql db delete -g {} -s {} -n {} --yes'.format(rg, server, database_name)) # Wait for deleted database to become visible. When run live, this will take around # 5-10 minutes. Unforunately there's no way to speed this up. Use timeout to ensure # test doesn't loop forever if there's a bug. start_time = datetime.now() timeout = timedelta(0, 15 * 60) # 15 minutes timeout while True: deleted_dbs = list(self.cmd('sql db list-deleted -g {} -s {}'.format(rg, server)).get_output_in_json()) if deleted_dbs: # Deleted db found, stop polling break # Deleted db not found, sleep (if running live) and then poll again. if self.is_live: self.assertTrue(datetime.now() < start_time + timeout, 'Deleted db not found before timeout expired.') sleep(10) # seconds deleted_db = deleted_dbs[0] # Restore deleted to latest point in time self.cmd('sql db restore -g {} -s {} -n {} --deleted-time {} --dest-name {}' ' --service-objective {} --edition {}' .format(rg, server, database_name, _get_deleted_date(deleted_db).isoformat(), restore_database_name1, restore_service_objective, restore_edition), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', restore_database_name1), JMESPathCheck('requestedServiceObjectiveName', restore_service_objective), JMESPathCheck('status', 'Online')]) # Restore deleted to earlier point in time self.cmd('sql db restore -g {} -s {} -n {} -t {} --deleted-time {} --dest-name {}' .format(rg, server, database_name, _get_earliest_restore_date(deleted_db).isoformat(), _get_deleted_date(deleted_db).isoformat(), restore_database_name2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', restore_database_name2), JMESPathCheck('status', 'Online')]) class SqlServerDbSecurityScenarioTest(ScenarioTest): def
(self, storage_account, resource_group): return self.cmd('storage account show -g {} -n {}' ' --query primaryEndpoints.blob' .format(resource_group, storage_account)).get_output_in_json() def _get_storage_key(self, storage_account, resource_group): return self.cmd('storage account keys list -g {} -n {} --query [0].value' .format(resource_group, storage_account)).get_output_in_json() @ResourceGroupPreparer() @ResourceGroupPreparer(parameter_name='resource_group_2') @SqlServerPreparer() @StorageAccountPreparer() @StorageAccountPreparer(parameter_name='storage_account_2', resource_group_parameter_name='resource_group_2') def test_sql_db_security_mgmt(self, resource_group, resource_group_2, resource_group_location, server, storage_account, storage_account_2): database_name = "cliautomationdb01" # get storage account endpoint and key storage_endpoint = self._get_storage_endpoint(storage_account, resource_group) key = self._get_storage_key(storage_account, resource_group) # create db self.cmd('sql db create -g {} -s {} -n {}' .format(resource_group, server, database_name), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', database_name), JMESPathCheck('status', 'Online')]) # get audit policy self.cmd('sql db audit-policy show -g {} -s {} -n {}' .format(resource_group, server, database_name), checks=[JMESPathCheck('resourceGroup', resource_group)]) # update audit policy - enable state_enabled = 'Enabled' retention_days = 30 audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP' audit_actions_expected = ['DATABASE_LOGOUT_GROUP', 'DATABASE_ROLE_MEMBER_CHANGE_GROUP'] self.cmd('sql db audit-policy update -g {} -s {} -n {}' ' --state {} --storage-key {} --storage-endpoint={}' ' --retention-days={} --actions {}' .format(resource_group, server, database_name, state_enabled, key, storage_endpoint, retention_days, audit_actions_input), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_enabled), JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it JMESPathCheck('storageEndpoint', storage_endpoint), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('auditActionsAndGroups', audit_actions_expected)]) # update audit policy - specify storage account and resource group. use secondary key storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2) self.cmd('sql db audit-policy update -g {} -s {} -n {} --storage-account {}' .format(resource_group, server, database_name, storage_account_2, resource_group_2), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_enabled), JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it JMESPathCheck('storageEndpoint', storage_endpoint_2), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('auditActionsAndGroups', audit_actions_expected)]) # update audit policy - disable state_disabled = 'Disabled' self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}' .format(resource_group, server, database_name, state_disabled), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_disabled), JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it JMESPathCheck('storageEndpoint', storage_endpoint_2), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('auditActionsAndGroups', audit_actions_expected)]) # get threat detection policy self.cmd('sql db threat-policy show -g {} -s {} -n {}' .format(resource_group, server, database_name), checks=[JMESPathCheck('resourceGroup', resource_group)]) # update threat detection policy - enable disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly' disabled_alerts_expected = 'Sql_Injection_Vulnerability;Access_Anomaly' email_addresses_input = '[email protected] [email protected]' email_addresses_expected = '[email protected];[email protected]' email_account_admins = 'Enabled' self.cmd('sql db threat-policy update -g {} -s {} -n {}' ' --state {} --storage-key {} --storage-endpoint {}' ' --retention-days {} --email-addresses {} --disabled-alerts {}' ' --email-account-admins {}' .format(resource_group, server, database_name, state_enabled, key, storage_endpoint, retention_days, email_addresses_input, disabled_alerts_input, email_account_admins), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_enabled), JMESPathCheck('storageAccountAccessKey', key), JMESPathCheck('storageEndpoint', storage_endpoint), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('emailAddresses', email_addresses_expected), JMESPathCheck('disabledAlerts', disabled_alerts_expected), JMESPathCheck('emailAccountAdmins', email_account_admins)]) # update threat policy - specify storage account and resource group. use secondary key key_2 = self._get_storage_key(storage_account_2, resource_group_2) self.cmd('sql db threat-policy update -g {} -s {} -n {} --storage-account {}' .format(resource_group, server, database_name, storage_account_2, resource_group_2), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_enabled), JMESPathCheck('storageAccountAccessKey', key_2), JMESPathCheck('storageEndpoint', storage_endpoint_2), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('emailAddresses', email_addresses_expected), JMESPathCheck('disabledAlerts', disabled_alerts_expected), JMESPathCheck('emailAccountAdmins', email_account_admins)]) # update threat policy - disable self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}' .format(resource_group, server, database_name, state_disabled), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('state', state_disabled), JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it JMESPathCheck('storageEndpoint', storage_endpoint_2), JMESPathCheck('retentionDays', retention_days), JMESPathCheck('auditActionsAndGroups', audit_actions_expected)]) class SqlServerDwMgmtScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_dw_mgmt(self, resource_group, resource_group_location, server): database_name = "cliautomationdb01" update_service_objective = 'DW200' update_storage = '20TB' update_storage_bytes = str(20 * 1024 * 1024 * 1024 * 1024) rg = resource_group loc_display = 'West US' # test sql db commands dw = self.cmd('sql dw create -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('location', loc_display), JMESPathCheck('edition', 'DataWarehouse'), JMESPathCheck('status', 'Online')]).get_output_in_json() # Sanity check that the default max size is not equal to the size that we will update to # later. That way we know that update is actually updating the size. self.assertNotEqual(dw['maxSizeBytes'], update_storage_bytes, 'Initial max size in bytes is equal to the value we want to update to later,' ' so we will not be able to verify that update max size is actually updating.') # DataWarehouse is a little quirky and is considered to be both a database and its # separate own type of thing. (Why? Because it has the same REST endpoint as regular # database, so it must be a database. However it has only a subset of supported operations, # so to clarify which operations are supported by dw we group them under `sql dw`.) So the # dw shows up under both `db list` and `dw list`. self.cmd('sql db list -g {} --server {}' .format(rg, server), checks=[ JMESPathCheck('length(@)', 2), # includes dw and master JMESPathCheck('sort([].name)', sorted([database_name, 'master'])), JMESPathCheck('[0].resourceGroup', rg), JMESPathCheck('[1].resourceGroup', rg)]) self.cmd('sql dw list -g {} --server {}' .format(rg, server), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', database_name), JMESPathCheck('[0].resourceGroup', rg)]) self.cmd('sql db show -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', rg)]) # pause/resume self.cmd('sql dw pause -g {} --server {} --name {}' .format(rg, server, database_name), checks=[NoneCheck()]) self.cmd('sql dw show --id {}' .format(dw['id']), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', rg), JMESPathCheck('status', 'Paused')]) self.cmd('sql dw resume -g {} --server {} --name {}' .format(rg, server, database_name), checks=[NoneCheck()]) self.cmd('sql dw show -g {} --server {} --name {}' .format(rg, server, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', rg), JMESPathCheck('status', 'Online')]) # Update DW storage self.cmd('sql dw update -g {} -s {} -n {} --max-size {}' ' --set tags.key1=value1' .format(rg, server, database_name, update_storage), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('maxSizeBytes', update_storage_bytes), JMESPathCheck('tags.key1', 'value1')]) # Update DW service objective self.cmd('sql dw update --id {} --service-objective {}' .format(dw['id'], update_service_objective), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('requestedServiceObjectiveName', update_service_objective), JMESPathCheck('maxSizeBytes', update_storage_bytes), JMESPathCheck('tags.key1', 'value1')]) # Delete DW self.cmd('sql dw delete -g {} --server {} --name {} --yes' .format(rg, server, database_name), checks=[NoneCheck()]) self.cmd('sql dw delete --id {} --yes' .format(dw['id']), checks=[NoneCheck()]) class SqlServerDnsAliasMgmtScenarioTest(ScenarioTest): # create 2 servers in the same resource group, and 1 server in a different resource group @ResourceGroupPreparer(parameter_name="resource_group_1", parameter_name_for_location="resource_group_location_1") @ResourceGroupPreparer(parameter_name="resource_group_2", parameter_name_for_location="resource_group_location_2") @SqlServerPreparer(parameter_name="server_name_1", resource_group_parameter_name="resource_group_1") @SqlServerPreparer(parameter_name="server_name_2", resource_group_parameter_name="resource_group_1") @SqlServerPreparer(parameter_name="server_name_3", resource_group_parameter_name="resource_group_2") def test_sql_server_dns_alias_mgmt(self, resource_group_1, resource_group_location_1, resource_group_2, resource_group_location_2, server_name_1, server_name_2, server_name_3): # helper class so that it's clear which servers are in which groups class ServerInfo(object): # pylint: disable=too-few-public-methods def __init__(self, name, group, location): self.name = name self.group = group self.location = location s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1) s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1) s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2) alias_name = 'alias1' # verify setup for s in (s1, s2, s3): self.cmd('sql server show -g {} -n {}' .format(s.group, s.name), checks=[ JMESPathCheck('name', s.name), JMESPathCheck('resourceGroup', s.group)]) # Create server dns alias self.cmd('sql server dns-alias create -n {} -s {} -g {}' .format(alias_name, s1.name, s1.group), checks=[ JMESPathCheck('name', alias_name), JMESPathCheck('resourceGroup', s1.group) ]) # Check that alias is created on a right server self.cmd('sql server dns-alias list -s {} -g {}' .format(s1.name, s1.group), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', alias_name) ]) # Repoint alias to the server within the same resource group self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}' .format(alias_name, s1.name, s2.name, s2.group), checks=[NoneCheck()]) # List the aliases on old server to check if alias is not pointing there self.cmd('sql server dns-alias list -s {} -g {}' .format(s1.name, s1.group), checks=[ JMESPathCheck('length(@)', 0) ]) # Check if alias is pointing to new server self.cmd('sql server dns-alias list -s {} -g {}' .format(s2.name, s2.group), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', alias_name) ]) # Repoint alias to the same server (to check that operation is idempotent) self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}' .format(alias_name, s1.name, s2.name, s2.group), checks=[NoneCheck()]) # Check if alias is pointing to the right server self.cmd('sql server dns-alias list -s {} -g {}' .format(s2.name, s2.group), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', alias_name) ]) # Repoint alias to the server within the same resource group self.cmd('sql server dns-alias set -n {} --original-server {} --original-resource-group {} -s {} -g {}' .format(alias_name, s2.name, s2.group, s3.name, s3.group), checks=[NoneCheck()]) # List the aliases on old server to check if alias is not pointing there self.cmd('sql server dns-alias list -s {} -g {}' .format(s2.name, s2.group), checks=[ JMESPathCheck('length(@)', 0) ]) # Check if alias is pointing to new server self.cmd('sql server dns-alias list -s {} -g {}' .format(s3.name, s3.group), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', alias_name) ]) # Drop alias self.cmd('sql server dns-alias delete -n {} -s {} -g {}' .format(alias_name, s3.name, s3.group), checks=[NoneCheck()]) # Verify that alias got dropped correctly self.cmd('sql server dns-alias list -s {} -g {}' .format(s3.name, s3.group), checks=[ JMESPathCheck('length(@)', 0) ]) class SqlServerDbReplicaMgmtScenarioTest(ScenarioTest): # create 2 servers in the same resource group, and 1 server in a different resource group @ResourceGroupPreparer(parameter_name="resource_group_1", parameter_name_for_location="resource_group_location_1") @ResourceGroupPreparer(parameter_name="resource_group_2", parameter_name_for_location="resource_group_location_2") @SqlServerPreparer(parameter_name="server_name_1", resource_group_parameter_name="resource_group_1") @SqlServerPreparer(parameter_name="server_name_2", resource_group_parameter_name="resource_group_1") @SqlServerPreparer(parameter_name="server_name_3", resource_group_parameter_name="resource_group_2") def test_sql_db_replica_mgmt(self, resource_group_1, resource_group_location_1, resource_group_2, resource_group_location_2, server_name_1, server_name_2, server_name_3): database_name = "cliautomationdb01" service_objective = 'S1' # helper class so that it's clear which servers are in which groups class ServerInfo(object): # pylint: disable=too-few-public-methods def __init__(self, name, group, location): self.name = name self.group = group self.location = location s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1) s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1) s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2) # verify setup for s in (s1, s2, s3): self.cmd('sql server show -g {} -n {}' .format(s.group, s.name), checks=[ JMESPathCheck('name', s.name), JMESPathCheck('resourceGroup', s.group)]) # create db in first server self.cmd('sql db create -g {} -s {} -n {}' .format(s1.group, s1.name, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', s1.group)]) # create replica in second server with min params # partner resouce group unspecified because s1.group == s2.group self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}' .format(s1.group, s1.name, database_name, s2.name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', s2.group)]) # check that the replica was created in the correct server self.cmd('sql db show -g {} -s {} -n {}' .format(s2.group, s2.name, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', s2.group)]) # create replica in third server with max params # --elastic-pool is untested self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}' ' --partner-resource-group {} --service-objective {}' .format(s1.group, s1.name, database_name, s3.name, s3.group, service_objective), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', s3.group), JMESPathCheck('requestedServiceObjectiveName', service_objective)]) # check that the replica was created in the correct server self.cmd('sql db show -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[ JMESPathCheck('name', database_name), JMESPathCheck('resourceGroup', s3.group)]) # list replica links on s1 - it should link to s2 and s3 self.cmd('sql db replica list-links -g {} -s {} -n {}' .format(s1.group, s1.name, database_name), checks=[JMESPathCheck('length(@)', 2)]) # list replica links on s3 - it should link only to s1 self.cmd('sql db replica list-links -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].role', 'Secondary'), JMESPathCheck('[0].partnerRole', 'Primary')]) # Failover to s3. self.cmd('sql db replica set-primary -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[NoneCheck()]) # list replica links on s3 - it should link to s1 and s2 self.cmd('sql db replica list-links -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[JMESPathCheck('length(@)', 2)]) # Stop replication from s3 to s2 twice. Second time should be no-op. for _ in range(2): # Delete link self.cmd('sql db replica delete-link -g {} -s {} -n {} --partner-resource-group {}' ' --partner-server {} --yes' .format(s3.group, s3.name, database_name, s2.group, s2.name), checks=[NoneCheck()]) # Verify link was deleted. s3 should still be the primary. self.cmd('sql db replica list-links -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].role', 'Primary'), JMESPathCheck('[0].partnerRole', 'Secondary')]) # Failover to s3 again (should be no-op, it's already primary) self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss' .format(s3.group, s3.name, database_name), checks=[NoneCheck()]) # s3 should still be the primary. self.cmd('sql db replica list-links -g {} -s {} -n {}' .format(s3.group, s3.name, database_name), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].role', 'Primary'), JMESPathCheck('[0].partnerRole', 'Secondary')]) # Force failover back to s1 self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss' .format(s1.group, s1.name, database_name), checks=[NoneCheck()]) class SqlElasticPoolsMgmtScenarioTest(ScenarioTest): def __init__(self, method_name): super(SqlElasticPoolsMgmtScenarioTest, self).__init__(method_name) self.pool_name = "cliautomationpool01" def verify_activities(self, activities, resource_group, server): if isinstance(activities, list.__class__): raise AssertionError("Actual value '{}' expected to be list class." .format(activities)) for activity in activities: if isinstance(activity, dict.__class__): raise AssertionError("Actual value '{}' expected to be dict class" .format(activities)) if activity['resourceGroup'] != resource_group: raise AssertionError("Actual value '{}' != Expected value {}" .format(activity['resourceGroup'], resource_group)) elif activity['serverName'] != server: raise AssertionError("Actual value '{}' != Expected value {}" .format(activity['serverName'], server)) elif activity['currentElasticPoolName'] != self.pool_name: raise AssertionError("Actual value '{}' != Expected value {}" .format(activity['currentElasticPoolName'], self.pool_name)) return True @ResourceGroupPreparer(location='eastus2') @SqlServerPreparer(location='eastus2') def test_sql_elastic_pools_mgmt(self, resource_group, resource_group_location, server): database_name = "cliautomationdb02" pool_name2 = "cliautomationpool02" edition = 'Standard' dtu = 1200 db_dtu_min = 10 db_dtu_max = 50 storage = '1200GB' storage_mb = 1228800 updated_dtu = 50 updated_db_dtu_min = 10 updated_db_dtu_max = 50 updated_storage = '50GB' updated_storage_mb = 51200 db_service_objective = 'S1' rg = resource_group loc_display = 'East US 2' # test sql elastic-pool commands elastic_pool_1 = self.cmd('sql elastic-pool create -g {} --server {} --name {} ' '--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} ' '--storage {}' .format(rg, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('location', loc_display), JMESPathCheck('state', 'Ready'), JMESPathCheck('dtu', dtu), JMESPathCheck('databaseDtuMin', db_dtu_min), JMESPathCheck('databaseDtuMax', db_dtu_max), JMESPathCheck('edition', edition), JMESPathCheck('storageMb', storage_mb)]).get_output_in_json() self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, self.pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('databaseDtuMin', db_dtu_min), JMESPathCheck('databaseDtuMax', db_dtu_max), JMESPathCheck('edition', edition), JMESPathCheck('storageMb', storage_mb), JMESPathCheck('zoneRedundant', False)]) self.cmd('sql elastic-pool show --id {}' .format(elastic_pool_1['id']), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('databaseDtuMin', db_dtu_min), JMESPathCheck('databaseDtuMax', db_dtu_max), JMESPathCheck('edition', edition), JMESPathCheck('storageMb', storage_mb)]) self.cmd('sql elastic-pool list -g {} --server {}' .format(rg, server), checks=[ JMESPathCheck('[0].resourceGroup', rg), JMESPathCheck('[0].name', self.pool_name), JMESPathCheck('[0].state', 'Ready'), JMESPathCheck('[0].databaseDtuMin', db_dtu_min), JMESPathCheck('[0].databaseDtuMax', db_dtu_max), JMESPathCheck('[0].edition', edition), JMESPathCheck('[0].storageMb', storage_mb)]) self.cmd('sql elastic-pool update -g {} --server {} --name {} ' '--dtu {} --storage {} --set tags.key1=value1' .format(rg, server, self.pool_name, updated_dtu, updated_storage), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('dtu', updated_dtu), JMESPathCheck('edition', edition), JMESPathCheck('databaseDtuMin', db_dtu_min), JMESPathCheck('databaseDtuMax', db_dtu_max), JMESPathCheck('storageMb', updated_storage_mb), JMESPathCheck('tags.key1', 'value1')]) self.cmd('sql elastic-pool update --id {} ' '--dtu {} --db-dtu-min {} --db-dtu-max {} --storage {}' .format(elastic_pool_1['id'], dtu, updated_db_dtu_min, updated_db_dtu_max, storage), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('dtu', dtu), JMESPathCheck('databaseDtuMin', updated_db_dtu_min), JMESPathCheck('databaseDtuMax', updated_db_dtu_max), JMESPathCheck('storageMb', storage_mb), JMESPathCheck('tags.key1', 'value1')]) self.cmd('sql elastic-pool update -g {} --server {} --name {} ' '--remove tags.key1' .format(rg, server, self.pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', self.pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('tags', {})]) # create a second pool with minimal params self.cmd('sql elastic-pool create -g {} --server {} --name {} ' .format(rg, server, pool_name2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name2), JMESPathCheck('location', loc_display), JMESPathCheck('state', 'Ready')]) self.cmd('sql elastic-pool list -g {} -s {}'.format(rg, server), checks=[JMESPathCheck('length(@)', 2)]) # Create a database directly in an Azure sql elastic pool self.cmd('sql db create -g {} --server {} --name {} ' '--elastic-pool {}' .format(rg, server, database_name, self.pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('elasticPoolName', self.pool_name), JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'), JMESPathCheck('status', 'Online')]) # Move database to second pool. Specify service objective just for fun self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}' ' --service-objective ElasticPool' .format(rg, server, database_name, pool_name2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('elasticPoolName', pool_name2), JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'), JMESPathCheck('status', 'Online')]) # Remove database from pool self.cmd('sql db update -g {} -s {} -n {} --service-objective {}' .format(rg, server, database_name, db_service_objective), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('elasticPoolName', None), JMESPathCheck('requestedServiceObjectiveName', db_service_objective), JMESPathCheck('status', 'Online')]) # Move database back into pool self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}' ' --service-objective ElasticPool' .format(rg, server, database_name, self.pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('elasticPoolName', self.pool_name), JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'), JMESPathCheck('status', 'Online')]) # List databases in a pool self.cmd('sql elastic-pool list-dbs -g {} -s {} -n {}' .format(rg, server, self.pool_name), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].resourceGroup', rg), JMESPathCheck('[0].name', database_name), JMESPathCheck('[0].elasticPoolName', self.pool_name)]) # List databases in a pool - alternative command self.cmd('sql db list -g {} -s {} --elastic-pool {}' .format(rg, server, self.pool_name), checks=[ JMESPathCheck('length(@)', 1), JMESPathCheck('[0].resourceGroup', rg), JMESPathCheck('[0].name', database_name), JMESPathCheck('[0].elasticPoolName', self.pool_name)]) # self.cmd('sql elastic-pool db show-activity -g {} --server {} --elastic-pool {}' # .format(rg, server, pool_name), # checks=[ # JMESPathCheck('length(@)', 1), # JMESPathCheck('[0].resourceGroup', rg), # JMESPathCheck('[0].serverName', server), # JMESPathCheck('[0].currentElasticPoolName', pool_name)]) # activities = self.cmd('sql elastic-pools db show-activity -g {} ' # '--server-name {} --elastic-pool-name {}' # .format(rg, server, pool_name), # checks=[JMESPathCheck('type(@)', 'array')]) # self.verify_activities(activities, resource_group) # delete sql server database self.cmd('sql db delete -g {} --server {} --name {} --yes' .format(rg, server, database_name), checks=[NoneCheck()]) # delete sql elastic pool self.cmd('sql elastic-pool delete -g {} --server {} --name {}' .format(rg, server, self.pool_name), checks=[NoneCheck()]) # delete sql elastic pool by id self.cmd('sql elastic-pool delete --id {}' .format(elastic_pool_1['id']), checks=[NoneCheck()]) class SqlElasticPoolOperationMgmtScenarioTest(ScenarioTest): def __init__(self, method_name): super(SqlElasticPoolOperationMgmtScenarioTest, self).__init__(method_name) self.pool_name = "operationtestep1" @ResourceGroupPreparer(location='southeastasia') @SqlServerPreparer(location='southeastasia') def test_sql_elastic_pool_operation_mgmt(self, resource_group, resource_group_location, server): edition = 'Premium' dtu = 125 db_dtu_min = 0 db_dtu_max = 50 storage = '50GB' storage_mb = 51200 update_dtu = 250 update_db_dtu_min = 50 update_db_dtu_max = 250 # Create elastic pool self.cmd('sql elastic-pool create -g {} --server {} --name {} ' '--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} --storage {}' .format(resource_group, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage), checks=[ JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', self.pool_name), JMESPathCheck('edition', edition), JMESPathCheck('state', 'Ready'), JMESPathCheck('dtu', dtu), JMESPathCheck('databaseDtuMin', db_dtu_min), JMESPathCheck('databaseDtuMax', db_dtu_max), JMESPathCheck('storageMb', storage_mb)]) # Update elastic pool self.cmd('sql elastic-pool update -g {} --server {} --name {} ' '--dtu {} --db-dtu-min {} --db-dtu-max {}' .format(resource_group, server, self.pool_name, update_dtu, update_db_dtu_min, update_db_dtu_max)) # List operations on the elastic pool ops = list(self.cmd('sql elastic-pool op list -g {} --server {} --elastic-pool {}' .format(resource_group, server, self.pool_name)).get_output_in_json()) # Cancel operation try: self.cmd('sql elastic-pool op cancel -g {} --server {} --elastic-pool {} --name {}' .format(resource_group, server, self.pool_name, ops[0]['name'])) except Exception as e: expectedmessage = "Cannot cancel management operation {} in current state.".format(ops[0]['name']) if expectedmessage in str(e): pass class SqlServerCapabilityScenarioTest(ScenarioTest): @AllowLargeResponse() def test_sql_capabilities(self): location = 'westus' # New capabilities are added quite frequently and the state of each capability depends # on your subscription. So it's not a good idea to make strict checks against exactly # which capabilities are returned. The idea is to just check the overall structure. db_max_size_length_jmespath = 'length([].supportedServiceLevelObjectives[].supportedMaxSizes[])' # Get all db capabilities self.cmd('sql db list-editions -l {}'.format(location), checks=[ # At least standard and premium edition exist JMESPathCheckExists("[?name == 'Standard']"), JMESPathCheckExists("[?name == 'Premium']"), # At least s0 and p1 service objectives exist JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"), JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"), # Max size data is omitted JMESPathCheck(db_max_size_length_jmespath, 0)]) # Get all db capabilities with size data self.cmd('sql db list-editions -l {} --show-details max-size'.format(location), checks=[ # Max size data is included JMESPathCheckGreaterThan(db_max_size_length_jmespath, 0)]) # Search for db edition - note that it's case insensitive self.cmd('sql db list-editions -l {} --edition standard'.format(location), checks=[ # Standard edition exists, other editions don't JMESPathCheckExists("[?name == 'Standard']"), JMESPathCheck("length([?name != 'Standard'])", 0)]) # Search for db service objective - note that it's case insensitive # Checked items: # * Standard edition exists, other editions don't # * S0 service objective exists, others don't exist self.cmd('sql db list-editions -l {} --edition standard --service-objective s0'.format(location), checks=[JMESPathCheckExists("[?name == 'Standard']"), JMESPathCheck("length([?name != 'Standard'])", 0), JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"), JMESPathCheck("length([].supportedServiceLevelObjectives[] | [?name != 'S0'])", 0)]) pool_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedMaxSizes[])' pool_db_max_dtu_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[])' pool_db_min_dtu_length_jmespath = ('length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxDtus[]' '.supportedPerDatabaseMinDtus[])') pool_db_max_size_length_jmespath = 'length([].supportedElasticPoolDtus[].supportedPerDatabaseMaxSizes[])' # Get all elastic pool capabilities self.cmd('sql elastic-pool list-editions -l {}'.format(location), checks=[JMESPathCheckExists("[?name == 'Standard']"), # At least standard and premium edition exist JMESPathCheckExists("[?name == 'Premium']"), JMESPathCheck(pool_max_size_length_jmespath, 0), # Optional details are omitted JMESPathCheck(pool_db_max_dtu_length_jmespath, 0), JMESPathCheck(pool_db_min_dtu_length_jmespath, 0), JMESPathCheck(pool_db_max_size_length_jmespath, 0)]) # Search for elastic pool edition - note that it's case insensitive self.cmd('sql elastic-pool list-editions -l {} --edition standard'.format(location), checks=[JMESPathCheckExists("[?name == 'Standard']"), # Standard edition exists, other editions don't JMESPathCheck("length([?name != 'Standard'])", 0)]) # Search for dtu limit self.cmd('sql elastic-pool list-editions -l {} --dtu 100'.format(location), checks=[ # All results have 100 dtu JMESPathCheckGreaterThan('length([].supportedElasticPoolDtus[?limit == `100`][])', 0), JMESPathCheck('length([].supportedElasticPoolDtus[?limit != `100`][])', 0)]) # Get all db capabilities with pool max size self.cmd('sql elastic-pool list-editions -l {} --show-details max-size'.format(location), checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0), JMESPathCheck(pool_db_max_dtu_length_jmespath, 0), JMESPathCheck(pool_db_min_dtu_length_jmespath, 0), JMESPathCheck(pool_db_max_size_length_jmespath, 0)]) # Get all db capabilities with per db max size self.cmd('sql elastic-pool list-editions -l {} --show-details db-max-size'.format(location), checks=[JMESPathCheck(pool_max_size_length_jmespath, 0), JMESPathCheck(pool_db_max_dtu_length_jmespath, 0), JMESPathCheck(pool_db_min_dtu_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)]) # Get all db capabilities with per db max dtu self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-max-dtu'.format(location), checks=[JMESPathCheck(pool_max_size_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0), JMESPathCheck(pool_db_min_dtu_length_jmespath, 0), JMESPathCheck(pool_db_max_size_length_jmespath, 0)]) # Get all db capabilities with per db min dtu (which is nested under per db max dtu) self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu'.format(location), checks=[JMESPathCheck(pool_max_size_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0), JMESPathCheck(pool_db_max_size_length_jmespath, 0)]) # Get all db capabilities with everything self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu db-max-dtu ' 'db-max-size max-size'.format(location), checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0), JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)]) class SqlServerImportExportMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() @StorageAccountPreparer() def test_sql_db_import_export_mgmt(self, resource_group, resource_group_location, server, storage_account): location_long_name = 'West US' admin_login = 'admin123' admin_password = 'SecretPassword123' db_name = 'cliautomationdb01' db_name2 = 'cliautomationdb02' db_name3 = 'cliautomationdb03' blob = 'testbacpac.bacpac' blob2 = 'testbacpac2.bacpac' container = 'bacpacs' firewall_rule_1 = 'allowAllIps' start_ip_address_1 = '0.0.0.0' end_ip_address_1 = '0.0.0.0' # create server firewall rule self.cmd('sql server firewall-rule create --name {} -g {} --server {} ' '--start-ip-address {} --end-ip-address {}' .format(firewall_rule_1, resource_group, server, start_ip_address_1, end_ip_address_1), checks=[JMESPathCheck('name', firewall_rule_1), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('startIpAddress', start_ip_address_1), JMESPathCheck('endIpAddress', end_ip_address_1)]) # create dbs self.cmd('sql db create -g {} --server {} --name {}' .format(resource_group, server, db_name), checks=[JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', db_name), JMESPathCheck('location', location_long_name), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online')]) self.cmd('sql db create -g {} --server {} --name {}' .format(resource_group, server, db_name2), checks=[JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', db_name2), JMESPathCheck('location', location_long_name), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online')]) self.cmd('sql db create -g {} --server {} --name {}' .format(resource_group, server, db_name3), checks=[JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('name', db_name3), JMESPathCheck('location', location_long_name), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online')]) # get storage account endpoint storage_endpoint = self.cmd('storage account show -g {} -n {}' ' --query primaryEndpoints.blob' .format(resource_group, storage_account)).get_output_in_json() bacpacUri = '{}{}/{}'.format(storage_endpoint, container, blob) bacpacUri2 = '{}{}/{}'.format(storage_endpoint, container, blob2) # get storage account key storageKey = self.cmd('storage account keys list -g {} -n {} --query [0].value' .format(resource_group, storage_account)).get_output_in_json() # Set Expiry expiryString = '9999-12-25T00:00:00Z' # Get sas key sasKey = self.cmd('storage blob generate-sas --account-name {} -c {} -n {} --permissions rw --expiry {}'.format( storage_account, container, blob2, expiryString)).get_output_in_json() # create storage account blob container self.cmd('storage container create -n {} --account-name {} --account-key {} ' .format(container, storage_account, storageKey), checks=[JMESPathCheck('created', True)]) # export database to blob container using both keys self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}' ' --storage-key {} --storage-key-type StorageAccessKey' ' --storage-uri {}' .format(server, db_name, resource_group, admin_password, admin_login, storageKey, bacpacUri), checks=[JMESPathCheck('blobUri', bacpacUri), JMESPathCheck('databaseName', db_name), JMESPathCheck('requestType', 'Export'), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('serverName', server), JMESPathCheck('status', 'Completed')]) self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}' ' --storage-key {} --storage-key-type SharedAccessKey' ' --storage-uri {}' .format(server, db_name, resource_group, admin_password, admin_login, sasKey, bacpacUri2), checks=[JMESPathCheck('blobUri', bacpacUri2), JMESPathCheck('databaseName', db_name), JMESPathCheck('requestType', 'Export'), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('serverName', server), JMESPathCheck('status', 'Completed')]) # import bacpac to second database using Storage Key self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}' ' --storage-key {} --storage-key-type StorageAccessKey' ' --storage-uri {}' .format(server, db_name2, resource_group, admin_password, admin_login, storageKey, bacpacUri), checks=[JMESPathCheck('blobUri', bacpacUri), JMESPathCheck('databaseName', db_name2), JMESPathCheck('name', 'import'), JMESPathCheck('requestType', 'Import'), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('serverName', server), JMESPathCheck('status', 'Completed')]) # import bacpac to third database using SAS key self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}' ' --storage-key {} --storage-key-type SharedAccessKey' ' --storage-uri {}' .format(server, db_name3, resource_group, admin_password, admin_login, sasKey, bacpacUri2), checks=[JMESPathCheck('blobUri', bacpacUri2), JMESPathCheck('databaseName', db_name3), JMESPathCheck('name', 'import'), JMESPathCheck('requestType', 'Import'), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('serverName', server), JMESPathCheck('status', 'Completed')]) class SqlServerConnectionStringScenarioTest(ScenarioTest): def test_sql_db_conn_str(self): # ADO.NET, username/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net').get_output_in_json() self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;') # ADO.NET, ADPassword conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADPassword').get_output_in_json() self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Password"') # ADO.NET, ADIntegrated conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADIntegrated').get_output_in_json() self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Integrated"') # SqlCmd, username/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd').get_output_in_json() self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -N -l 30') # SqlCmd, ADPassword conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADPassword').get_output_in_json() self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -G -N -l 30') # SqlCmd, ADIntegrated conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADIntegrated').get_output_in_json() self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -G -N -l 30') # JDBC, user name/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc').get_output_in_json() self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>@myserver;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30') # JDBC, ADPassword conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADPassword').get_output_in_json() self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword') # JDBC, ADIntegrated conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADIntegrated').get_output_in_json() self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryIntegrated') # PHP PDO, user name/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo').get_output_in_json() self.assertEqual(conn_str, '$conn = new PDO("sqlsrv:server = tcp:myserver.database.windows.net,1433; Database = mydb; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");') # PHP PDO, ADPassword self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADPassword', expect_failure=True) # PHP PDO, ADIntegrated self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADIntegrated', expect_failure=True) # PHP, user name/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php').get_output_in_json() self.assertEqual(conn_str, '$connectionOptions = array("UID"=>"<username>@myserver", "PWD"=>"<password>", "Database"=>mydb, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:myserver.database.windows.net,1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);') # PHP, ADPassword self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADPassword', expect_failure=True) # PHP, ADIntegrated self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADIntegrated', expect_failure=True) # ODBC, user name/password conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc').get_output_in_json() self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;') # ODBC, ADPassword conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADPassword').get_output_in_json() self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryPassword') # ODBC, ADIntegrated conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADIntegrated').get_output_in_json() self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryIntegrated') class SqlTransparentDataEncryptionScenarioTest(ScenarioTest): def wait_for_encryption_scan(self, rg, sn, db_name): active_scan = True retry_attempts = 5 while active_scan: tdeactivity = self.cmd('sql db tde list-activity -g {} -s {} -d {}' .format(rg, sn, db_name)).get_output_in_json() # if tdeactivity is an empty array, there is no ongoing encryption scan active_scan = (len(tdeactivity) > 0) time.sleep(10) retry_attempts -= 1 if retry_attempts <= 0: raise CliTestError("Encryption scan still ongoing: {}.".format(tdeactivity)) @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_tde(self, resource_group, server): rg = resource_group sn = server db_name = self.create_random_name("sqltdedb", 20) # create database self.cmd('sql db create -g {} --server {} --name {}' .format(rg, sn, db_name)) # validate encryption is on by default self.cmd('sql db tde show -g {} -s {} -d {}' .format(rg, sn, db_name), checks=[JMESPathCheck('status', 'Enabled')]) self.wait_for_encryption_scan(rg, sn, db_name) # disable encryption self.cmd('sql db tde set -g {} -s {} -d {} --status Disabled' .format(rg, sn, db_name), checks=[JMESPathCheck('status', 'Disabled')]) self.wait_for_encryption_scan(rg, sn, db_name) # validate encryption is disabled self.cmd('sql db tde show -g {} -s {} -d {}' .format(rg, sn, db_name), checks=[JMESPathCheck('status', 'Disabled')]) # enable encryption self.cmd('sql db tde set -g {} -s {} -d {} --status Enabled' .format(rg, sn, db_name), checks=[JMESPathCheck('status', 'Enabled')]) self.wait_for_encryption_scan(rg, sn, db_name) # validate encryption is enabled self.cmd('sql db tde show -g {} -s {} -d {}' .format(rg, sn, db_name), checks=[JMESPathCheck('status', 'Enabled')]) @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_tdebyok(self, resource_group, server): resource_prefix = 'sqltdebyok' # add identity to server server_resp = self.cmd('sql server update -g {} -n {} -i' .format(resource_group, server)).get_output_in_json() server_identity = server_resp['identity']['principalId'] # create db db_name = self.create_random_name(resource_prefix, 20) self.cmd('sql db create -g {} --server {} --name {}' .format(resource_group, server, db_name)) # create vault and acl server identity vault_name = self.create_random_name(resource_prefix, 24) self.cmd('keyvault create -g {} -n {} --enable-soft-delete true' .format(resource_group, vault_name)) self.cmd('keyvault set-policy -g {} -n {} --object-id {} --key-permissions wrapKey unwrapKey get list' .format(resource_group, vault_name, server_identity)) # create key key_name = self.create_random_name(resource_prefix, 32) key_resp = self.cmd('keyvault key create -n {} -p software --vault-name {}' .format(key_name, vault_name)).get_output_in_json() kid = key_resp['key']['kid'] # add server key server_key_resp = self.cmd('sql server key create -g {} -s {} -k {}' .format(resource_group, server, kid), checks=[ JMESPathCheck('uri', kid), JMESPathCheck('serverKeyType', 'AzureKeyVault')]) server_key_name = server_key_resp.get_output_in_json()['name'] # validate show key self.cmd('sql server key show -g {} -s {} -k {}' .format(resource_group, server, kid), checks=[ JMESPathCheck('uri', kid), JMESPathCheck('serverKeyType', 'AzureKeyVault'), JMESPathCheck('name', server_key_name)]) # validate list key (should return 2 items) self.cmd('sql server key list -g {} -s {}' .format(resource_group, server), checks=[JMESPathCheck('length(@)', 2)]) # validate encryption protector is service managed via show self.cmd('sql server tde-key show -g {} -s {}' .format(resource_group, server), checks=[ JMESPathCheck('serverKeyType', 'ServiceManaged'), JMESPathCheck('serverKeyName', 'ServiceManaged')]) # update encryption protector to akv key self.cmd('sql server tde-key set -g {} -s {} -t AzureKeyVault -k {}' .format(resource_group, server, kid), checks=[ JMESPathCheck('serverKeyType', 'AzureKeyVault'), JMESPathCheck('serverKeyName', server_key_name), JMESPathCheck('uri', kid)]) # validate encryption protector is akv via show self.cmd('sql server tde-key show -g {} -s {}' .format(resource_group, server), checks=[ JMESPathCheck('serverKeyType', 'AzureKeyVault'), JMESPathCheck('serverKeyName', server_key_name), JMESPathCheck('uri', kid)]) # update encryption protector to service managed self.cmd('sql server tde-key set -g {} -s {} -t ServiceManaged' .format(resource_group, server), checks=[ JMESPathCheck('serverKeyType', 'ServiceManaged'), JMESPathCheck('serverKeyName', 'ServiceManaged')]) # validate encryption protector is service managed via show self.cmd('sql server tde-key show -g {} -s {}' .format(resource_group, server), checks=[ JMESPathCheck('serverKeyType', 'ServiceManaged'), JMESPathCheck('serverKeyName', 'ServiceManaged')]) # delete server key self.cmd('sql server key delete -g {} -s {} -k {}' .format(resource_group, server, kid)) # wait for key to be deleted time.sleep(10) # validate deleted server key via list (should return 1 item) self.cmd('sql server key list -g {} -s {}' .format(resource_group, server), checks=[JMESPathCheck('length(@)', 1)]) class SqlServerVnetMgmtScenarioTest(ScenarioTest): @ResourceGroupPreparer() @SqlServerPreparer() def test_sql_vnet_mgmt(self, resource_group, resource_group_location, server): rg = resource_group vnet_rule_1 = 'rule1' vnet_rule_2 = 'rule2' # Create vnet's - vnet1 and vnet2 vnetName1 = 'vnet1' vnetName2 = 'vnet2' subnetName = 'subnet1' addressPrefix = '10.0.1.0/24' endpoint = 'Microsoft.Sql' # Vnet 1 without service endpoints to test ignore-missing-vnet-service-endpoint feature self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName1)) self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {}' .format(rg, vnetName1, subnetName, addressPrefix)) vnet1 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}' .format(subnetName, vnetName1, rg)).get_output_in_json() vnet_id_1 = vnet1['id'] # Vnet 2 self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName2)) self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {} --service-endpoints {}' .format(rg, vnetName2, subnetName, addressPrefix, endpoint), checks=JMESPathCheck('serviceEndpoints[0].service', 'Microsoft.Sql')) vnet2 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}' .format(subnetName, vnetName2, rg)).get_output_in_json() vnet_id_2 = vnet2['id'] # test sql server vnet-rule create using subnet name and vnet name and ignore-missing-vnet-service-endpoint flag self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {} --vnet-name {} -i' .format(vnet_rule_1, rg, server, subnetName, vnetName1)) # test sql server vnet-rule show rule 1 self.cmd('sql server vnet-rule show --name {} -g {} --server {}' .format(vnet_rule_1, rg, server), checks=[ JMESPathCheck('name', vnet_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)]) # test sql server vnet-rule create using subnet id self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {}' .format(vnet_rule_2, rg, server, vnet_id_2), checks=[ JMESPathCheck('name', vnet_rule_2), JMESPathCheck('resourceGroup', rg), JMESPathCheck('virtualNetworkSubnetId', vnet_id_2), JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)]) # test sql server vnet-rule update rule 1 with vnet 2 self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {}' .format(vnet_rule_1, rg, server, vnet_id_2), checks=[ JMESPathCheck('name', vnet_rule_1), JMESPathCheck('resourceGroup', rg), JMESPathCheck('virtualNetworkSubnetId', vnet_id_2), JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)]) # test sql server vnet-rule update rule 2 with vnet 1 and ignore-missing-vnet-service-endpoint flag self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {} -i' .format(vnet_rule_2, rg, server, vnet_id_1), checks=[JMESPathCheck('name', vnet_rule_2), JMESPathCheck('resourceGroup', rg), JMESPathCheck('virtualNetworkSubnetId', vnet_id_1), JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)]) # test sql server vnet-rule list self.cmd('sql server vnet-rule list -g {} --server {}'.format(rg, server), checks=[JMESPathCheck('length(@)', 2)]) # test sql server vnet-rule delete rule 1 self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_1, rg, server), checks=NoneCheck()) # test sql server vnet-rule delete rule 2 self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_2, rg, server), checks=NoneCheck()) class SqlSubscriptionUsagesScenarioTest(ScenarioTest): def test_sql_subscription_usages(self): self.cmd('sql list-usages -l westus', checks=[JMESPathCheckGreaterThan('length(@)', 2)]) self.cmd('sql show-usage -l westus -u SubscriptionFreeDatabaseDaysLeft', checks=[ JMESPathCheck('name', 'SubscriptionFreeDatabaseDaysLeft'), JMESPathCheckGreaterThan('limit', 0)]) class SqlZoneResilienceScenarioTest(ScenarioTest): @ResourceGroupPreparer(location='eastus2') @SqlServerPreparer(location='eastus2') def test_sql_zone_resilient_database(self, resource_group, resource_group_location, server): database_name = "createUnzonedUpdateToZonedDb" database_name_2 = "createZonedUpdateToUnzonedDb" database_name_3 = "updateNoParamForUnzonedDb" database_name_4 = "updateNoParamForZonedDb" rg = resource_group loc_display = "East US 2" # Test creating database with zone resilience set to false. Expect regular database created. self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant {}' .format(rg, server, database_name, "Premium", False), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', False)]) # Test running update on regular database with zone resilience set to true. Expect zone resilience to update to true. self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --zone-redundant' .format(rg, server, database_name, 'P1'), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online'), JMESPathCheck('requestedServiceObjectiveName', 'P1'), JMESPathCheck('zoneRedundant', True)]) # Test creating database with zone resilience set to true. Expect zone resilient database created. self.cmd('sql db create -g {} --server {} --name {} --edition {} --z' .format(rg, server, database_name_2, "Premium"), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_2), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', True)]) # Test running update on zoned database with zone resilience set to false. Expect zone resilience to update to false self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --z {}' .format(rg, server, database_name_2, 'P1', False), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_2), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online'), JMESPathCheck('requestedServiceObjectiveName', 'P1'), JMESPathCheck('zoneRedundant', False)]) # Create database with no zone resilience set. Expect regular database created. self.cmd('sql db create -g {} --server {} --name {} --edition {}' .format(rg, server, database_name_3, "Premium"), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_3), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', False)]) # Test running update on regular database with no zone resilience set. Expect zone resilience to stay false. self.cmd('sql db update -g {} -s {} -n {} --service-objective {}' .format(rg, server, database_name_3, 'P2'), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_3), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online'), JMESPathCheck('requestedServiceObjectiveName', 'P2'), JMESPathCheck('zoneRedundant', False)]) # Create database with zone resilience set. Expect zone resilient database created. self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant' .format(rg, server, database_name_4, "Premium"), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_4), JMESPathCheck('location', loc_display), JMESPathCheck('elasticPoolName', None), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', True)]) # Test running update on zoned database with no zone resilience set. Expect zone resilience to stay true. self.cmd('sql db update -g {} -s {} -n {} --service-objective {}' .format(rg, server, database_name_4, 'P2'), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', database_name_4), JMESPathCheck('elasticPoolName', None), JMESPathCheck('status', 'Online'), JMESPathCheck('requestedServiceObjectiveName', 'P2'), JMESPathCheck('zoneRedundant', True)]) @ResourceGroupPreparer(location='eastus2') @SqlServerPreparer(location='eastus2') def test_sql_zone_resilient_pool(self, resource_group, resource_group_location, server): pool_name = "createUnzonedUpdateToZonedPool" pool_name_2 = "createZonedUpdateToUnzonedPool" pool_name_3 = "updateNoParamForUnzonedPool" pool_name_4 = "updateNoParamForZonedPool" rg = resource_group # Test creating pool with zone resilience set to false. Expect regular pool created. self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --z {}' .format(rg, server, pool_name, "Premium", False)) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name), JMESPathCheck('state', 'Ready'), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', False)]) # Test running update on regular pool with zone resilience set to true. Expect zone resilience to update to true self.cmd('sql elastic-pool update -g {} -s {} -n {} --z' .format(rg, server, pool_name)) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name), JMESPathCheck('zoneRedundant', True)]) # Test creating pool with zone resilience set to true. Expect zone resilient pool created. self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant' .format(rg, server, pool_name_2, "Premium")) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_2), JMESPathCheck('state', 'Ready'), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', True)]) # Test running update on zoned pool with zone resilience set to false. Expect zone resilience to update to false self.cmd('sql elastic-pool update -g {} -s {} -n {} --zone-redundant {}' .format(rg, server, pool_name_2, False)) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_2), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_2), JMESPathCheck('zoneRedundant', False)]) # Create pool with no zone resilience set. Expect regular pool created. self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}' .format(rg, server, pool_name_3, "Premium")) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_3), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_3), JMESPathCheck('state', 'Ready'), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', False)]) # Test running update on regular pool with no zone resilience set. Expect zone resilience to stay false self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}' .format(rg, server, pool_name_3, 250)) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_3), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_3), JMESPathCheck('dtu', 250), JMESPathCheck('zoneRedundant', False)]) # Create pool with zone resilience set. Expect zone resilient pool created. self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant' .format(rg, server, pool_name_4, "Premium")) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_4), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_4), JMESPathCheck('state', 'Ready'), JMESPathCheck('edition', 'Premium'), JMESPathCheck('zoneRedundant', True)]) # Test running update on zoned pool with no zone resilience set. Expect zone resilience to stay true self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}' .format(rg, server, pool_name_4, 250, True)) self.cmd('sql elastic-pool show -g {} --server {} --name {}' .format(rg, server, pool_name_4), checks=[ JMESPathCheck('resourceGroup', rg), JMESPathCheck('name', pool_name_4), JMESPathCheck('dtu', 250), JMESPathCheck('zoneRedundant', True)])
_get_storage_endpoint
CustomerContract.workbench.js
import React, { Component } from 'react' import { connect } from 'dva' import moment from 'moment' import BooleanOption from '../../components/BooleanOption'; import BaseTool from '../../common/Base.tool' import { Tag, Button, Row, Col, Icon, Card, Tabs, Table, Radio, DatePicker, Tooltip, Menu, Dropdown,Badge, Switch,Select,Form,AutoComplete,Modal } from 'antd' import { Link, Route, Redirect} from 'dva/router' import numeral from 'numeral' import {TagCloud} from '../../components/Charts' import Trend from '../../components/Trend' import NumberInfo from '../../components/NumberInfo' import { getTimeDistance } from '../../utils/utils' import PageHeaderLayout from '../../layouts/PageHeaderLayout' import styles from './CustomerContract.workbench.less' import DescriptionList from '../../components/DescriptionList'; import ImagePreview from '../../components/ImagePreview'; import GlobalComponents from '../../custcomponents'; import DashboardTool from '../../common/Dashboard.tool' import appLocaleName from '../../common/Locale.tool' const {aggregateDataset,calcKey, defaultHideCloseTrans, defaultImageListOf,defaultSettingListOf,defaultBuildTransferModal, defaultExecuteTrans,defaultHandleTransferSearch,defaultShowTransferModel, defaultRenderExtraHeader, defaultSubListsOf,defaultRenderAnalytics, defaultRenderExtraFooter,renderForTimeLine,renderForNumbers, defaultQuickFunctions, defaultRenderSubjectList, }= DashboardTool const {defaultFormatNumber} = BaseTool const formatNumber = defaultFormatNumber const { Description } = DescriptionList; const { TabPane } = Tabs const { RangePicker } = DatePicker const { Option } = Select const imageList =(customerContract)=>{return [ ]} const internalImageListOf = (customerContract) =>defaultImageListOf(customerContract,imageList) const optionList =(customerContract)=>{return [ ]} const buildTransferModal = defaultBuildTransferModal const showTransferModel = defaultShowTransferModel
const internalLargeTextOf = (customerContract) =>{ return null } const internalRenderExtraHeader = defaultRenderExtraHeader const internalRenderExtraFooter = defaultRenderExtraFooter const internalSubListsOf = defaultSubListsOf const renderSettingDropDown = (cardsData,targetComponent)=>{ return (<div style={{float: 'right'}} > <Dropdown overlay={renderSettingMenu(cardsData,targetComponent)} placement="bottomRight" > <Button> <Icon type="setting" theme="filled" twoToneColor="#00b" style={{color:'#3333b0'}}/> 设置 <Icon type="down"/> </Button> </Dropdown></div>) } const renderSettingMenuItem = (item,cardsData,targetComponent) =>{ const userContext = null return (<Menu.Item key={item.name}> <Link to={`/customerContract/${targetComponent.props.customerContract.id}/list/${item.name}/${item.displayName}/`}> <span>{item.displayName}</span> </Link> </Menu.Item> ) } const renderSettingMenu = (cardsData,targetComponent) =>{ const userContext = null return (<Menu> <Menu.Item key="profile"> <Link to={`/customerContract/${targetComponent.props.customerContract.id}/permission`}><Icon type="safety-certificate" theme="twoTone" twoToneColor="#52c41a"/><span>{appLocaleName(userContext,"Permission")}</span></Link> </Menu.Item> <Menu.Divider /> {cardsData.subSettingItems.map(item=>renderSettingMenuItem(item,cardsData,targetComponent))} </Menu>) } const internalRenderTitle = (cardsData,targetComponent) =>{ const linkComp=cardsData.returnURL?<Link to={cardsData.returnURL}> <Icon type="double-left" style={{marginRight:"10px"}} /> </Link>:null return (<div>{linkComp}{cardsData.cardsName}: {cardsData.displayName} {renderSettingDropDown(cardsData,targetComponent)}</div>) } const internalSummaryOf = (cardsData,targetComponent) =>{ const quickFunctions = targetComponent.props.quickFunctions || internalQuickFunctions const customerContract = cardsData.cardsSource const {CustomerContractService} = GlobalComponents const userContext = null return ( <div> <DescriptionList className={styles.headerList} size="small" col="4"> <Description term="ID" style={{wordBreak: 'break-all'}}>{customerContract.id}</Description> <Description term="名称" style={{wordBreak: 'break-all'}}>{customerContract.name}</Description> <Description term="接触时间">{ moment(customerContract.contactTime).format('YYYY-MM-DD HH:mm')}</Description> <Description term="客户">{customerContract.customer==null?appLocaleName(userContext,"NotAssigned"):`${customerContract.customer.displayName}(${customerContract.customer.id})`} </Description> </DescriptionList> </div> ) } const renderTagCloud=(cardsData)=>{ if(cardsData.subItems.length<10){ return null } const tagValue = cardsData.subItems.map(item=>({name:item.displayName, value: item.count})) return <div > <div style={{verticalAlign:"middle",textAlign:"center",backgroundColor:"rgba(0, 0, 0, 0.65)",color:"white",fontWeight:"bold",height:"40px"}}> <span style={{display:"inline-block",marginTop:"10px"}}>{`${cardsData.displayName}画像`}</span> </div> <TagCloud data={tagValue} height={200} style={{backgroundColor:"white"}}/> </div> } const internalQuickFunctions = defaultQuickFunctions class CustomerContractWorkbench extends Component { state = { transferModalVisiable: false, candidateReferenceList: {}, candidateServiceName:"", candidateObjectType:"city", targetLocalName:"", transferServiceName:"", currentValue:"", transferTargetParameterName:"", defaultType: 'customerContract' } componentDidMount() { } render() { // eslint-disable-next-line max-len const { id,displayName, } = this.props.customerContract if(!this.props.customerContract.class){ return null } const returnURL = this.props.returnURL const cardsData = {cardsName:window.trans('customer_contract'),cardsFor: "customerContract", cardsSource: this.props.customerContract,returnURL,displayName, subItems: [ ], subSettingItems: [ ], }; const renderExtraHeader = this.props.renderExtraHeader || internalRenderExtraHeader const settingListOf = this.props.settingListOf || internalSettingListOf const imageListOf = this.props.imageListOf || internalImageListOf const subListsOf = this.props.subListsOf || internalSubListsOf const largeTextOf = this.props.largeTextOf ||internalLargeTextOf const summaryOf = this.props.summaryOf || internalSummaryOf const renderTitle = this.props.renderTitle || internalRenderTitle const renderExtraFooter = this.props.renderExtraFooter || internalRenderExtraFooter const renderAnalytics = this.props.renderAnalytics || defaultRenderAnalytics const quickFunctions = this.props.quickFunctions || internalQuickFunctions const renderSubjectList = this.props.renderSubjectList || internalRenderSubjectList // {quickFunctions(cardsData)} return ( <PageHeaderLayout title={renderTitle(cardsData,this)} content={summaryOf(cardsData,this)} wrapperClassName={styles.advancedForm} > {renderExtraHeader(cardsData.cardsSource)} {imageListOf(cardsData.cardsSource,4)} {renderAnalytics(cardsData.cardsSource)} {settingListOf(cardsData.cardsSource)} {largeTextOf(cardsData.cardsSource)} </PageHeaderLayout> ) } } export default connect(state => ({ customerContract: state._customerContract, returnURL: state.breadcrumb.returnURL, }))(Form.create()(CustomerContractWorkbench))
const internalRenderSubjectList = defaultRenderSubjectList const internalSettingListOf = (customerContract) =>defaultSettingListOf(customerContract, optionList)
gen_assets.go
// Code generated by "esc -pkg mappings -o plugin/storage/es/mappings/gen_assets.go -ignore assets -prefix plugin/storage/es/mappings plugin/storage/es/mappings"; DO NOT EDIT. package mappings
"compress/gzip" "encoding/base64" "fmt" "io" "io/ioutil" "net/http" "os" "path" "sync" "time" ) type _escLocalFS struct{} var _escLocal _escLocalFS type _escStaticFS struct{} var _escStatic _escStaticFS type _escDirectory struct { fs http.FileSystem name string } type _escFile struct { compressed string size int64 modtime int64 local string isDir bool once sync.Once data []byte name string } func (_escLocalFS) Open(name string) (http.File, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } return os.Open(f.local) } func (_escStaticFS) prepare(name string) (*_escFile, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } var err error f.once.Do(func() { f.name = path.Base(name) if f.size == 0 { return } var gr *gzip.Reader b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) gr, err = gzip.NewReader(b64) if err != nil { return } f.data, err = ioutil.ReadAll(gr) }) if err != nil { return nil, err } return f, nil } func (fs _escStaticFS) Open(name string) (http.File, error) { f, err := fs.prepare(name) if err != nil { return nil, err } return f.File() } func (dir _escDirectory) Open(name string) (http.File, error) { return dir.fs.Open(dir.name + name) } func (f *_escFile) File() (http.File, error) { type httpFile struct { *bytes.Reader *_escFile } return &httpFile{ Reader: bytes.NewReader(f.data), _escFile: f, }, nil } func (f *_escFile) Close() error { return nil } func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { if !f.isDir { return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name) } fis, ok := _escDirs[f.local] if !ok { return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local) } limit := count if count <= 0 || limit > len(fis) { limit = len(fis) } if len(fis) == 0 && count > 0 { return nil, io.EOF } return fis[0:limit], nil } func (f *_escFile) Stat() (os.FileInfo, error) { return f, nil } func (f *_escFile) Name() string { return f.name } func (f *_escFile) Size() int64 { return f.size } func (f *_escFile) Mode() os.FileMode { return 0 } func (f *_escFile) ModTime() time.Time { return time.Unix(f.modtime, 0) } func (f *_escFile) IsDir() bool { return f.isDir } func (f *_escFile) Sys() interface{} { return f } // FS returns a http.Filesystem for the embedded assets. If useLocal is true, // the filesystem's contents are instead used. func FS(useLocal bool) http.FileSystem { if useLocal { return _escLocal } return _escStatic } // Dir returns a http.Filesystem for the embedded assets on a given prefix dir. // If useLocal is true, the filesystem's contents are instead used. func Dir(useLocal bool, name string) http.FileSystem { if useLocal { return _escDirectory{fs: _escLocal, name: name} } return _escDirectory{fs: _escStatic, name: name} } // FSByte returns the named file from the embedded assets. If useLocal is // true, the filesystem's contents are instead used. func FSByte(useLocal bool, name string) ([]byte, error) { if useLocal { f, err := _escLocal.Open(name) if err != nil { return nil, err } b, err := ioutil.ReadAll(f) _ = f.Close() return b, err } f, err := _escStatic.prepare(name) if err != nil { return nil, err } return f.data, nil } // FSMustByte is the same as FSByte, but panics if name is not present. func FSMustByte(useLocal bool, name string) []byte { b, err := FSByte(useLocal, name) if err != nil { panic(err) } return b } // FSString is the string version of FSByte. func FSString(useLocal bool, name string) (string, error) { b, err := FSByte(useLocal, name) return string(b), err } // FSMustString is the string version of FSMustByte. func FSMustString(useLocal bool, name string) string { return string(FSMustByte(useLocal, name)) } var _escData = map[string]*_escFile{ "/.nocover": { name: ".nocover", local: "plugin/storage/es/mappings/.nocover", size: 43, modtime: 1563791427, compressed: ` H4sIAAAAAAAC/youSSzJzFYoSEzOTkxPVcjILy4pVkgsLcnXTU/NSy1KLElNUUjLzEkt1uMCBAAA//8y IKK1KwAAAA== `, }, "/jaeger-service-7.json": { name: "jaeger-service-7.json", local: "plugin/storage/es/mappings/jaeger-service-7.json", size: 878, modtime: 1566472262, compressed: ` H4sIAAAAAAAC/8ySwW7aQBCG736K1agnBFZViR72RluqVmppBcopikaDPdibeNeb3YEEIb97ZGTAhHDL IReP5H++35/t3SVKgXE5P6MnEQ4uglYwuCcuOIwih43JeDSAYbsYWcS4IoJuuQOZurVdcsB6hbGkkLcN n3aIs5u/36Zz/PcTF78m8x8LxGb4NhbYVyajS3A+/f/n9/fJBWrJe+OK1HEUznFluMpjWhlrBPT489lu 4Mc1R4lpRlnJKTtaVgxawpoTpfa90PWdXizfOrImQ2HrKxKOoG/3iVK7brbfw5NDoSKiJX9gu6yrPL+r FMjWM2h44O1THXIYnqemcHVgpGW9YdBfxl97cdPfBU9SoiXJStAgVKQDOMZN8oroOftQZxzjh9DuXNJr +vt51/1NH2rPQQzHkxx0B3RGlvvK13Wvqh41oX0Miande7Qmh2uTNMlLAAAA//8YcMrbbgMAAA== `, }, "/jaeger-service.json": { name: "jaeger-service.json", local: "plugin/storage/es/mappings/jaeger-service.json", size: 1060, modtime: 1566229177, compressed: ` H4sIAAAAAAAC/8yTT2/UMBDF7/kU1ojTamshpHLwrUARSFDQVpwQGs3Gs1mD7RjbKayqfHfk4pKkW+2J Q3PIn/F78/xz7NtGCMjsgqXMoASsvhN3HM8SxxvT8tkK1kWSOGfjuwSqOIQA4zX/ln5wW47Y7zDtKeoE Sjy7Rbz68vHV5QY/vcXrdxebN9eI4/pxW+RgTUvHxs3l5w/vX18cWR2FYHwnPafMGneGrU7SGmcyqPPn C23knwOnnGRL7Z4le9paBpXjwEc9OUp98ORMC2pHNnEjxF0y1MQJHTXvaLAZ7yulRtZOn0LA3zA9NStX RRECahbeL30C9fWfeWpTVj6Qx0xdQkdhHnE3Wif3sF5+6iEwKPjBh1991LB+OG4630dG2vY3DOrF+cuF YFzqIVDeo6Pc7kFBpk6uYCYYm0d8C4oQ+5ZTemIgdVbyFFB9+9bMukE9HbMNEGIfOGbDabENqvCKHC/R TmGdQJrhQAmkbHr//7o382e5j83Y/AkAAP//qd2MzCQEAAA= `, }, "/jaeger-span-7.json": { name: "jaeger-span-7.json", local: "plugin/storage/es/mappings/jaeger-span-7.json", size: 3420, modtime: 1566472262, compressed: ` H4sIAAAAAAAC/+xWXW+UQBR951eQG5+aLTEm9YG3amtsYqtp65Mxk7twYaedL2fuVjcN/93A0hYKbE2k xhhflix3zuHcmXsO3EZxDNLk9EM4ZCZvAqQx7F0hleT3g0OzvweLuF4WiFmaMkBao+5wiVnrJXlhCxFW 6PMa/+JWiLPPp2+Oz8XHd+Li/eH50YUQ1WIc5skpmeEQeH786cPJ28MBVKNz0pSJocCUi0KSykOipJYM 6cHL3lpP39YUOCQZZitKyOBSEaTs1xTFccMLLd9DY/nGoJaZYNJOIVOA9EtTiePb9lrvh0MjGMsgNLo7 bFtrKft34xh44whSuKbNd+tzWPSrsjTWk8ClvSFIXx287pSr7lpwyCuhkbMVpMBYJntwX66iR4iOZudt RiH8FbJbLcmU/Ob6tT1N560jz5LCgzhgjxmdHHXlTkudlHkvERx6Mnzh0MxIGualq7cBWVpzhprmE8no +VKOMyprSpiGnEqlZBgD5sjU1VFYr5EhBXI2Wwm9BQ6Y8/W2w1/XUigsRxVIw3WQDRHKjgO2mdIV3YYB pAWqQIuelwYjuaWSmgKjdlM+6jYxNMk2z6awA4E7Re4U2hSvaTO8+5Tln7T9oKsGcYNqTX/saYzlZUP7 PM+Lpv5V00l8l3m9yZuco0D+Rmb02OizJjLjZNrb5RVlDLug/6f0n5xSTwV5Mhk9W0R6Ksa6nm+sh18G s/IPX+q/S7/jOB55dNyfveXPdm4jPpxtT0d9N2fQzT1xE5+s9W8VVdHPAAAA//+SuQbQXA0AAA== `, }, "/jaeger-span.json": { name: "jaeger-span.json", local: "plugin/storage/es/mappings/jaeger-span.json", size: 3830, modtime: 1566229177, compressed: ` H4sIAAAAAAAC/+xW0W/TPhB+z18RnX5PUxf9hDQe8jbYEJPYQNt4Qsi6JpfUm2Mb+zqopv7vKE1Lm9ZJ QGoQEvShbWx/391n333xcxTHwFRZhUyQxnDygFSSO/UW9ekJTOp5T8xSlx7Senkcg9Q5fUv0vJqSE6YQ foYu95DG/z0LcfPx+tXlrXj/Rty9Pb+9uBNiOQnDHFklMzwE3l5+eHf1+vwAWqG1UpeJJs+Ui0KSyn2i ZCUZ0rP/W2sdfZmTZ59kmM0oIY1TRZCym9MBJ7kkX2isZAZpgcpTFMeryLCOuJUucipwrlhsRuoxVGr7 GMfQBMu3ZPVnLSWOYR1LbPbdQ/rpB3hLU++8RS0YSy8qtLshVrPr5PbH6xNdWIIUHmnx1bgcJvvzstTG kcCpeSJIX5y9bC1YtteDRZ6JCjmbQQqMZXICOwuWUQDXUmGdycj7P0zIOqukT9D63+doh211KDunb52x 5FiSb9UAO8zo6qKtqU9Pj5YdHWDRkeY7i3oEcj8Obb1ByNLoG6zo+EkzOr6XXczK6BL6gddSKenD8Lw2 xlZWhXEVMqRA1mQzUTXgYIR83uj+1cwKhWVHPlJzbdBhnDJdsMYy20LanjfZa9lAUTeEsiLPWNnudm0L C3ViY93dDIF0B1IeSHs1/UiL0Piwz/yE1wRUrlBPqOb026Mylvcr8jHjRn3Py6FXxMaA9+q1p/I8uSeZ 0aGJjPCqYOx5HZnpA2UMQwT/6vuvrm9HBTnSGY1uyY6K8G4cuylCd5oR4oSuIccIM3hkB13f1fF7oNFP ONjZR971jk4+vrGOU6u9N/jmt/5eRsvoewAAAP//W45CgfYOAAA= `, }, "/": { name: "/", local: `plugin/storage/es/mappings`, isDir: true, }, } var _escDirs = map[string][]os.FileInfo{ "plugin/storage/es/mappings": { _escData["/.nocover"], _escData["/jaeger-service-7.json"], _escData["/jaeger-service.json"], _escData["/jaeger-span-7.json"], _escData["/jaeger-span.json"], }, }
import ( "bytes"
validators_clearers.js
// all the validators for the question should be added in response_validators . e.g., if there is a validator // for question of type MCQ, then it should be added as response_validators['MCQ'] = function() { return true;}; var response_validators = {}; //Response clearer is a dictionary which contains the response clearer for the questions. //Every question type being implemented can define its response_clearer if there is anything //special required to clear the response in the form. //Note: This JS function will be triggered when the student presses the 'Clear Response' button //on a particular question. The response clearer should be added as response_clearer['MCQ'] = function (qid) { //Somehow clear the form for the question with given qid //}; var gform; var response_clearer = {};
/** * Add the response_clearer for the MCQ type question. **/ response_clearer['MCQ'] = function (qid) { var form = $("#form-question-" + qid); form.find("input").attr({'checked': false}); gform =form; }; /** * Add the response_clearer for the TFQ type question. **/ response_clearer['TFQ'] = function (qid) { var form = $('#form-question-' + qid); form.find('input').attr({'checked': false}); }; /** Currently there are no special validator and clearer**/
main.rs
extern crate clap; extern crate trow; use clap::{Arg, ArgMatches}; use trow::{NetAddr, TrowBuilder}; use std::fs::File; use std::io::prelude::*; const PROGRAM_NAME: &str = "Trow"; const PROGRAM_DESC: &str = "\nThe Cluster Registry"; const DEFAULT_CERT_PATH: &str = "./certs/domain.crt"; const DEFAULT_KEY_PATH: &str = "./certs/domain.key"; /* Responsible for configuring and starting the Trow registry. Parses command line arguments and returns ArgMatches object. Will cause the program to exit if error or on help/version argument. */ fn parse_args<'a>() -> ArgMatches<'a> { clap::App::new(PROGRAM_NAME) .version("0.1") .author("From Container Solutions") .about(PROGRAM_DESC) .arg( Arg::with_name("host") .long("host") .value_name("host") .help("Sets the name of the host or interface to start Trow on. Defaults to 0.0.0.0") .takes_value(true), ) .arg( Arg::with_name("port") .long("port") .value_name("port") .help("The port that trow will listen on. Defaults to 8443 with TLS, 8000 without.") .takes_value(true), ) .arg( Arg::with_name("no-tls") .long("no-tls") .help("Turns off TLS. Normally only used in development and debugging. If used in production, make sure you understand the risks.") ) .arg( Arg::with_name("cert") .short("c") .long("cert") .value_name("cert") .help(&format!("Path to TLS certificate. Defaults to {}.", DEFAULT_CERT_PATH)) .takes_value(true), ) .arg( Arg::with_name("key") .short("k") .long("key") .value_name("key") .help(&format!("Path to TLS private key. Defaults to {}.", DEFAULT_KEY_PATH)) .takes_value(true), ) .arg( Arg::with_name("data-dir") .short("d") .long("data-dir") .value_name("data_dir") .help("Directory to store images and metadata in.") .takes_value(true), ) .arg( Arg::with_name("names") .short("n") .long("names") .value_name("names") .help("Host names for registry. Used in validation callbacks. Separate with comma or use quotes and spaces") .takes_value(true), ) .arg( Arg::with_name("dry-run") .long("dry-run") .value_name("dry_run") .help("Don't acutally run Trow, just validate arguments. For testing purposes.") .takes_value(false), ) .arg( Arg::with_name("allow-docker-official") .long("allow-docker-official") .value_name("allow_docker_official") .help("Docker official images (e.g. the debian base image) will be allowed in validation callbacks.") .takes_value(false) ) .arg( Arg::with_name("deny-k8s-images") .long("deny-k8s-images") .value_name("deny_k8s_images") .help("By default, validation callbacks will allow various Kubernetes system images by default. This option will deny those images; be careful as this may disable cluster installation and updates.") .takes_value(false) ) .arg( Arg::with_name("allow-prefixes") .long("allow-prefixes") .value_name("allow_prefixes") .help("Images that begin with any of the listed prefixes will be allowed in validation callbaks. Separate with a comma or use quotes and spaces. For example 'quay.io/coreos,myhost.com/' will match quay.io/coreos/etcd and myhost.com/myimage/myrepo:tag. Use docker.io as the hostname for the Docker Hub.") .takes_value(true) ) .arg( Arg::with_name("allow-images") .long("allow-images") .value_name("allow_images") .help("Images that match a full name in the list will be allowed in validation callbacks. Separate with a comma or use quotes and spaces. Include the hostname. For example 'quay.io/coreos/etcd:latest'. Use docker.io as the hostname for the Docker Hub.") .takes_value(true) ) .arg( Arg::with_name("disallow-local-prefixes") .long("disallow-local-prefixes") .value_name("disallow_local_prefixes") .help("Disallow local images that match the prefix _not_ including any host name. For example 'beta' will match myhost.com/beta/myapp assuming myhost.com is the name of this registry.") .takes_value(true) ) .arg( Arg::with_name("disallow-local-images") .long("disallow-local-images") .value_name("disallow_local_images") .help("Disallow local images that match the full name _not_ including any host name. For example 'beta/myapp:tag' will match myhost.com/beta/myapp:tag assuming myhost.com is the name of this registry.") .takes_value(true) ) .arg( Arg::with_name("user") .long("user") .short("u") .value_name("user") .help("Set the username that can be used to access Trow (e.g. via docker login). Must be used with --pass or --pass-file") .takes_value(true) ) .arg( Arg::with_name("password") .long("password") .short("p") .value_name("password") .help("Set the password that can be used to access Trow (e.g. via docker login). Must be used with --user") .takes_value(true) ) .arg( Arg::with_name("password-file") .long("password-file") .value_name("password-file") .help("Location of file with password that can be used to access Trow (e.g. via docker login). Must be used with --user") .takes_value(true) ) .arg( Arg::with_name("version") .long("version") .short("v") .value_name("version") .help("Get the version number of Trow") .takes_value(false) ) .get_matches() } fn
(names: &str) -> Vec<String> { //split on , or whitespace let ret_str = names.replace(",", " "); ret_str.split_whitespace().map(|x| x.to_owned()).collect() } fn main() { let matches = parse_args(); if matches.is_present("version") { println!("Trow version {} Commit {}", env!("CARGO_PKG_VERSION"), env!("VCS_REF")); std::process::exit(0); } let no_tls = matches.is_present("no-tls"); let host = matches.value_of("host").unwrap_or("0.0.0.0"); let default_port = if no_tls { 8000 } else { 8443 }; let port: u16 = matches .value_of("port") .map_or(default_port, |x| x.parse().unwrap()); let cert_path = matches.value_of("cert").unwrap_or("./certs/domain.crt"); let key_path = matches.value_of("key").unwrap_or("./certs/domain.key"); let data_path = matches.value_of("data-dir").unwrap_or("./data"); let host_names_str = matches.value_of("names").unwrap_or(host); let host_names = parse_list(&host_names_str); let dry_run = matches.is_present("dry-run"); let mut allow_prefixes = parse_list(matches.value_of("allow-prefixes").unwrap_or("")); if matches.is_present("allow-docker-official") { allow_prefixes.push("docker.io/".to_owned()); } if !matches.is_present("deny-k8s-images") { allow_prefixes.push("k8s.gcr.io/".to_owned()); allow_prefixes.push("docker.io/containersol/trow".to_owned()); } let allow_images = parse_list(matches.value_of("allow-images").unwrap_or("")); let deny_prefixes = parse_list(matches.value_of("disallow-local-prefixes").unwrap_or("")); let deny_images = parse_list(matches.value_of("disallow-local-images").unwrap_or("")); let addr = NetAddr { host: host.to_string(), port, }; let grpc_listen = NetAddr { host: "127.0.0.1".to_owned(), port: 51000, }; let mut builder = TrowBuilder::new( data_path.to_string(), addr, grpc_listen, host_names, allow_prefixes, allow_images, deny_prefixes, deny_images, dry_run, ); if !no_tls { builder.with_tls(cert_path.to_string(), key_path.to_string()); } if matches.is_present("user") { let user = matches.value_of("user").expect("Failed to read user name"); if matches.is_present("password") { let pass = matches.value_of("password").expect("Failed to read user password"); builder.with_user(user.to_string(), pass.to_string()); } else if matches.is_present("password-file") { let file_name = matches.value_of("password-file").expect( "Failed to read user password file"); let mut file = File::open(file_name).expect( &format!("Failed to read password file {}", file_name)); let mut pass = String::new(); file.read_to_string(&mut pass).expect( &format!("Failed to read password file {}", file_name)); //Remove final newline if present if pass.ends_with('\n') { pass.pop(); if pass.ends_with('\r') { pass.pop(); } } builder.with_user(user.to_string(), pass); } else { eprintln!("Either --pass or --password-file must be set if --user is set"); std::process::exit(1); } } builder.start().unwrap_or_else(|e| { eprintln!("Error launching Trow:\n\n{}", e); std::process::exit(1); }); }
parse_list
remoteKernelConnectionHandler.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. import { inject, injectable } from 'inversify'; import { Disposable, NotebookDocument } from 'vscode'; import { IVSCodeNotebookController } from './types'; import { INotebookControllerManager } from '../types'; import { IExtensionSyncActivationService } from '../../platform/activation/types'; import { IDisposableRegistry } from '../../platform/common/types'; import { noop } from '../../platform/common/utils/misc'; import { traceInfo } from '../../platform/logging'; import { IKernel, IKernelProvider, isLocalConnection } from '../../kernels/types'; import { PreferredRemoteKernelIdProvider } from '../../kernels/jupyter/preferredRemoteKernelIdProvider'; import { ILiveRemoteKernelConnectionUsageTracker } from '../../kernels/jupyter/types'; @injectable() export class
implements IExtensionSyncActivationService { constructor( @inject(IDisposableRegistry) private readonly disposables: IDisposableRegistry, @inject(IKernelProvider) private readonly kernelProvider: IKernelProvider, @inject(INotebookControllerManager) private readonly controllers: INotebookControllerManager, @inject(ILiveRemoteKernelConnectionUsageTracker) private readonly liveKernelTracker: ILiveRemoteKernelConnectionUsageTracker, @inject(PreferredRemoteKernelIdProvider) private readonly preferredRemoteKernelIdProvider: PreferredRemoteKernelIdProvider ) {} activate(): void { this.kernelProvider.onDidStartKernel(this.onDidStartKernel, this, this.disposables); this.controllers.onNotebookControllerSelectionChanged( this.onNotebookControllerSelectionChanged, this, this.disposables ); } private onNotebookControllerSelectionChanged({ selected, notebook, controller }: { selected: boolean; notebook: NotebookDocument; controller: IVSCodeNotebookController; }) { if (controller.connection.kind === 'connectToLiveRemoteKernel' && controller.connection.kernelModel.id) { if (selected) { this.liveKernelTracker.trackKernelIdAsUsed( notebook.uri, controller.connection.serverId, controller.connection.kernelModel.id ); } else { this.liveKernelTracker.trackKernelIdAsNotUsed( notebook.uri, controller.connection.serverId, controller.connection.kernelModel.id ); } } if (isLocalConnection(controller.connection)) { this.preferredRemoteKernelIdProvider.clearPreferredRemoteKernelId(notebook.uri).catch(noop); } } private onDidStartKernel(kernel: IKernel) { if (kernel.creator !== 'jupyterExtension' || !kernel.resourceUri) { return; } const resource = kernel.resourceUri; if (kernel.kernelConnectionMetadata.kind === 'startUsingRemoteKernelSpec') { const serverId = kernel.kernelConnectionMetadata.serverId; const subscription = kernel.kernelSocket.subscribe((info) => { const kernelId = info?.options.id; if (!kernel.disposed && !kernel.disposing && kernelId) { traceInfo(`Updating preferred kernel for remote notebook ${kernelId}`); this.preferredRemoteKernelIdProvider.storePreferredRemoteKernelId(resource, kernelId).catch(noop); this.liveKernelTracker.trackKernelIdAsUsed(resource, serverId, kernelId); } }); this.disposables.push(new Disposable(() => subscription.unsubscribe())); } } }
RemoteKernelConnectionHandler
partition_vec.rs
//! A [disjoint-sets/union-find] implementation of a vector partitioned in sets. //! //! See [`PartitionVec<T>`] for more information. //! //! [disjoint-sets/union-find]: https://en.wikipedia.org/wiki/Disjoint-set_data_structure //! [`PartitionVec<T>`]: struct.PartitionVec.html use { std::{ ops, cmp::Ordering, iter::{ FromIterator, FusedIterator, }, }, crate::{ disjoint_sets::metadata::Metadata, extend_mut, }, }; #[cfg(feature = "rayon")] use rayon::prelude::*; #[cfg(feature = "proptest")] use proptest::prelude::*; /// A [disjoint-sets/union-find] implementation of a vector partitioned in sets. /// /// Most methods that are defined on a `Vec` also work on a `PartitionVec`. /// In addition to this each element stored in the `PartitionVec` is a member of a set. /// Initially each element has its own set but sets can be joined with the `union` method. /// /// In addition to the normal implementation we store an additional index for each element. /// These indices form a circular linked list of the set the element is in. /// This allows for fast iteration of the set using the `set` method /// and is used to speed up the performance of other methods. /// /// This implementation chooses not to expose the `find` method and instead has a `same_set` method. /// This is so that the representative of the set stays an implementation detail which gives /// us more freedom to change it behind the scenes for improved performance. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec!['a', 'b', 'c', 'd']; /// partition_vec.union(1, 2); /// partition_vec.union(2, 3); /// /// assert!(partition_vec.same_set(1, 3)); /// /// for (index, &value) in partition_vec.set(1) { /// assert!(index >= 1); /// assert!(index <= 3); /// assert!(value != 'a'); /// } /// # } /// ``` /// /// [disjoint-sets/union-find]: https://en.wikipedia.org/wiki/Disjoint-set_data_structure #[derive(Clone)] pub struct PartitionVec<T> { /// Each index has a value. /// We store these in a separate `Vec` so we can easily dereference it to a slice. data: Vec<T>, /// The metadata for each value, this `Vec` will always have the same size as `values`. meta: Vec<Metadata>, } /// Creates a [`PartitionVec`] containing the arguments. /// /// There are tree forms of the `partition_vec!` macro: /// /// - Create a [`PartitionVec`] containing a given list of elements all in distinct sets: /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec!['a', 'b', 'c']; /// /// assert!(partition_vec[0] == 'a'); /// assert!(partition_vec[1] == 'b'); /// assert!(partition_vec[2] == 'c'); /// /// assert!(partition_vec.is_singleton(0)); /// assert!(partition_vec.is_singleton(1)); /// assert!(partition_vec.is_singleton(2)); /// # } /// ``` /// /// - Create a [`PartitionVec`] containing a given list of elements in the sets specified: /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec![ /// 'a' => 0, /// 'b' => 1, /// 'c' => 2, /// 'd' => 1, /// 'e' => 0, /// ]; /// /// assert!(partition_vec[0] == 'a'); /// assert!(partition_vec[1] == 'b'); /// assert!(partition_vec[2] == 'c'); /// assert!(partition_vec[3] == 'd'); /// assert!(partition_vec[4] == 'e'); /// /// assert!(partition_vec.same_set(0, 4)); /// assert!(partition_vec.same_set(1, 3)); /// assert!(partition_vec.is_singleton(2)); /// # } /// ``` /// /// You can use any identifiers that implement `Hash` and `Eq`. /// Elements with the same set identifiers will be placed in the same set. /// These identifiers will only be used when constructing a [`PartitionVec`] /// and will not be stored further. /// This means `println!("{:?}", partition_vec![3 => 'a', 1 => 'a'])` will display `[3 => 0, 1 => 0]`. /// /// - Create a [`PartitionVec`] of distinct sets from a given element and size: /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec!['a'; 3]; /// /// assert!(partition_vec[0] == 'a'); /// assert!(partition_vec[1] == 'a'); /// assert!(partition_vec[2] == 'a'); /// /// assert!(partition_vec.is_singleton(0)); /// assert!(partition_vec.is_singleton(1)); /// assert!(partition_vec.is_singleton(2)); /// # } /// ``` /// /// [`PartitionVec`]: partition_vec/struct.PartitionVec.html #[macro_export] macro_rules! partition_vec { ($elem: expr; $len: expr) => { $crate::PartitionVec::from_elem($elem, $len); }; ($($elem: expr),*) => { { let len = partitions_count_expr![$($elem),*]; let mut partition_vec = $crate::PartitionVec::with_capacity(len); $( partition_vec.push($elem); )* partition_vec } }; ($($elem: expr,)*) => { partition_vec![$($elem),*]; }; ($($elem: expr => $set: expr),*) => { { let len = partitions_count_expr![$($elem),*]; let mut partition_vec = $crate::PartitionVec::with_capacity(len); let mut map = ::std::collections::HashMap::new(); $( let last_index = partition_vec.len(); partition_vec.push($elem); if let Some(&index) = map.get(&$set) { partition_vec.union(index, last_index); } else { map.insert($set, last_index); } )* partition_vec } }; ($($elem: expr => $set: expr,)*) => { partition_vec![$($elem => $set),*]; } } impl<T> PartitionVec<T> { /// Constructs a new, empty `PartitionVec<T>`. /// /// The `PartitionVec<T>` will not allocate until elements are pushed onto it. /// /// # Examples /// /// ``` /// # #![allow(unused_mut)] /// use partitions::PartitionVec; /// /// let mut partition_vec: PartitionVec<()> = PartitionVec::new(); /// ``` #[inline] pub fn new() -> Self { Self { data: Vec::new(), meta: Vec::new(), } } /// Constructs a new, empty `PartitionVec<T>` with the specified capacity. /// /// The `PartitionVec<T>` will be able to hold exactly `capacity` /// elements without reallocating. /// If capacity is 0, the partition_vec will not allocate. /// /// # Examples /// /// ``` /// use partitions::PartitionVec; /// /// let mut partition_vec = PartitionVec::with_capacity(10); /// /// assert!(partition_vec.len() == 0); /// assert!(partition_vec.capacity() == 10); /// /// // This can be done without reallocating. /// for i in 0 .. 10 { /// partition_vec.push(i); /// } /// /// // We can add more elements but this will reallocate. /// partition_vec.push(11); /// ``` #[inline] pub fn with_capacity(capacity: usize) -> Self { Self { data: Vec::with_capacity(capacity), meta: Vec::with_capacity(capacity), } } /// Joins the sets of the `first_index` and the `second_index`. /// /// This method will be executed in `O(α(n))` time where `α` is the inverse /// Ackermann function. The inverse Ackermann function has value below 5 /// for any value of `n` that can be written in the physical universe. /// /// # Panics /// /// If `first_index` or `second_index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![(); 4]; /// /// // All elements start out in their own sets. /// assert!(partition_vec.len_of_set(0) == 1); /// assert!(partition_vec.len_of_set(1) == 1); /// assert!(partition_vec.len_of_set(2) == 1); /// assert!(partition_vec.len_of_set(3) == 1); /// /// partition_vec.union(1, 2); /// /// // Now 1 and 2 share a set. /// assert!(partition_vec.len_of_set(0) == 1); /// assert!(partition_vec.len_of_set(1) == 2); /// assert!(partition_vec.len_of_set(2) == 2); /// assert!(partition_vec.len_of_set(3) == 1); /// /// partition_vec.union(2, 3); /// /// // We added 3 to the existing set with 1 and 2. /// assert!(partition_vec.len_of_set(0) == 1); /// assert!(partition_vec.len_of_set(1) == 3); /// assert!(partition_vec.len_of_set(2) == 3); /// assert!(partition_vec.len_of_set(3) == 3); /// # } /// ``` pub fn union(&mut self, first_index: usize, second_index: usize) { let i = self.find(first_index); let j = self.find(second_index); if i == j { return } // We swap the values of the links. let link_i = self.meta[i].link(); let link_j = self.meta[j].link(); self.meta[i].set_link(link_j); self.meta[j].set_link(link_i); // We add to the tree with the highest rank. match Ord::cmp(&self.meta[i].rank(), &self.meta[j].rank()) { Ordering::Less => { self.meta[i].set_parent(j); }, Ordering::Equal => { // We add the first tree to the second tree. self.meta[i].set_parent(j); // The second tree becomes larger. self.meta[j].set_rank(self.meta[j].rank() + 1); }, Ordering::Greater => { self.meta[j].set_parent(i); }, } } /// Returns `true` if `first_index` and `second_index` are in the same set. /// /// This method will be executed in `O(α(n))` time where `α` is the inverse /// Ackermann function. /// /// # Panics /// /// If `first_index` or `second_index` are out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # fn main() { /// let mut partition_vec = partition_vec![(); 4]; /// /// partition_vec.union(1, 3); /// partition_vec.union(0, 1); /// /// assert!(partition_vec.same_set(0, 1)); /// assert!(!partition_vec.same_set(0, 2)); /// assert!(partition_vec.same_set(0, 3)); /// assert!(!partition_vec.same_set(1, 2)); /// assert!(partition_vec.same_set(1, 3)); /// assert!(!partition_vec.same_set(2, 3)); /// # } /// ``` #[inline] pub fn same_set(&self, first_index: usize, second_index: usize) -> bool { self.find(first_index) == self.find(second_index) } /// Returns `true` if `first_index` and `second_index` are in different sets. /// /// This method will be executed in `O(α(n))` time where `α` is the inverse /// Ackermann function. /// /// # Panics /// /// If `first_index` or `second_index` are out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # fn main() { /// let mut partition_vec = partition_vec![(); 4]; /// /// partition_vec.union(1, 3); /// partition_vec.union(0, 1); /// /// assert!(!partition_vec.other_sets(0, 1)); /// assert!(partition_vec.other_sets(0, 2)); /// assert!(!partition_vec.other_sets(0, 3)); /// assert!(partition_vec.other_sets(1, 2)); /// assert!(!partition_vec.other_sets(1, 3)); /// assert!(partition_vec.other_sets(2, 3)); /// # } /// ``` #[inline] pub fn other_sets(&self, first_index: usize, second_index: usize) -> bool { self.find(first_index) != self.find(second_index) } /// Will remove `index` from its set while leaving the other members in it. /// /// After this `index` will be the only element of its set. /// This won't change the `PartitionVec<T>` if `index` is already the only element. /// This method will be executed in `O(m)` time where `m` is the size of the set of `index`. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// () => 'a', /// () => 'a', /// () => 'a', /// () => 'b', /// ]; /// /// // 0, 1, and 2 share a set. /// assert!(partition_vec.len_of_set(0) == 3); /// assert!(partition_vec.len_of_set(1) == 3); /// assert!(partition_vec.len_of_set(2) == 3); /// assert!(partition_vec.len_of_set(3) == 1); /// /// partition_vec.make_singleton(2); /// /// // Now 2 has its own set and 1, and 2 still share a set. /// assert!(partition_vec.len_of_set(0) == 2); /// assert!(partition_vec.len_of_set(1) == 2); /// assert!(partition_vec.len_of_set(2) == 1); /// assert!(partition_vec.len_of_set(3) == 1); /// # } /// ``` pub fn make_singleton(&mut self, index: usize) { let mut current = self.meta[index].link(); if current != index { // We make this the new root. let root = current; self.meta[root].set_rank(1); // All parents except for the last are updated. while self.meta[current].link() != index { self.meta[current].set_parent(root); current = self.meta[current].link(); } // We change the last parent and link. self.meta[current].set_parent(root); self.meta[current].set_link(root); } self.meta[index] = Metadata::new(index); } /// Returns `true` if `index` is the only element of its set. /// /// This will be done in `O(1)` time. /// /// # Panics /// /// If `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![(); 4]; /// /// partition_vec.union(1, 3); /// /// assert!(partition_vec.is_singleton(0)); /// assert!(!partition_vec.is_singleton(1)); /// assert!(partition_vec.is_singleton(2)); /// assert!(!partition_vec.is_singleton(3)); /// # } /// ``` #[inline] pub fn is_singleton(&self, index: usize) -> bool { self.meta[index].link() == index } /// Returns the amount of elements in the set that `index` belongs to. /// /// This will be done in `O(m)` time where `m` is the size of the set that `index` belongs to. /// /// # Panics /// /// If `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![true; 3]; /// /// assert!(partition_vec.len_of_set(0) == 1); /// assert!(partition_vec.len_of_set(1) == 1); /// assert!(partition_vec.len_of_set(2) == 1); /// /// partition_vec.union(0, 2); /// /// assert!(partition_vec.len_of_set(0) == 2); /// assert!(partition_vec.len_of_set(1) == 1); /// assert!(partition_vec.len_of_set(2) == 2); /// # } /// ``` pub fn len_of_set(&self, index: usize) -> usize { let mut current = self.meta[index].link(); let mut count = 1; while current != index { current = self.meta[current].link(); count += 1; } count } /// Returns the amount of sets in the `PartitionVec<T>`. /// /// This method will be executed in `O(n α(n))` where `α` is the inverse Ackermann function. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec![ /// 8 => 0, /// 3 => 1, /// 4 => 0, /// 3 => 1, /// 7 => 2, /// ]; /// /// assert!(partition_vec.amount_of_sets() == 3); /// # } /// ``` pub fn amount_of_sets(&self) -> usize { let mut done = bit_vec![false; self.len()]; let mut count = 0; for i in 0 .. self.len() { if !done.get(self.find(i)).unwrap() { done.set(self.find(i), true); count += 1; } } count } /// Gives the representative of the set that `index` belongs to. /// /// This method will be executed in `O(α(n))` time where `α` is the inverse /// Ackermann function. Each index of a set /// will give the same value. To see if two indexes point to values in /// the same subset compare the results of `find`. /// /// This method is private to keep the representative of the set an implementation /// detail, this gives greater freedom to change the representative of the set. /// /// # Panics /// /// If `index` is out of bounds. pub(crate) fn find(&self, index: usize) -> usize { // If the node is its own parent we have found the root. if self.meta[index].parent() == index { index } else { // This method is recursive so each parent on the way to the root is updated. let root = self.find(self.meta[index].parent()); // We update the parent to the root for a lower tree. self.meta[index].set_parent(root); root } } /// Gives the representative of the set that `index` belongs to. /// /// This method is slightly faster than `find` but still `O(a(n))` time. /// This method wont update the parents while finding the representative and should /// only be used if the parents will be updated immediately afterwards. /// /// # Panics /// /// If `index` is out of bounds. #[inline] pub(crate) fn find_final(&self, mut index: usize) -> usize { while index != self.meta[index].parent() { index = self.meta[index].parent(); } index } /// Returns the number of elements the `PartitionVec<T>` can hold without reallocating. /// /// # Examples /// /// ``` /// let mut partition_vec = partitions::PartitionVec::with_capacity(6); /// /// for i in 0 .. 6 { /// partition_vec.push(i); /// } /// /// assert!(partition_vec.capacity() == 6); /// /// partition_vec.push(6); /// /// assert!(partition_vec.capacity() >= 7); /// ``` #[inline] pub fn capacity(&self) -> usize { usize::min(self.data.capacity(), self.meta.capacity()) } /// Appends an element to the back of the `PartitionVec<T>`. /// /// This element has its own disjoint set. /// /// # Panics /// /// Panics if the number of elements in the `PartitionVec<T>` overflows a `usize`. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 'a' => 0, /// 'b' => 0, /// 'c' => 1, /// 'd' => 2, /// ]; /// /// partition_vec.push('e'); /// /// assert!(partition_vec.amount_of_sets() == 4); /// assert!(partition_vec[4] == 'e'); /// # } /// ``` #[inline] pub fn push(&mut self, elem: T) { let old_len = self.len(); self.data.push(elem); self.meta.push(Metadata::new(old_len)); } /// Removes the last element returns it, or `None` if it is empty. /// /// This will be done in `O(m)` time where `m` is the size of the set /// that `index` belongs to. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 'a' => 0, /// 'b' => 0, /// 'c' => 1, /// 'd' => 0, /// ]; /// /// assert!(partition_vec.pop() == Some('d')); /// /// assert!(partition_vec.amount_of_sets() == 2); /// assert!(partition_vec.len() == 3); /// # } /// ``` pub fn pop(&mut self) -> Option<T> { let last_index = self.data.len() - 1; self.make_singleton(last_index); self.meta.pop()?; Some(self.data.pop().unwrap()) } /// Inserts an element at `index` within the `PartitionVec<T>`, shifting all /// elements after it to the right. /// /// This will take `O(n)` time. /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 0 => 0, /// 1 => 1, /// 2 => 0, /// 3 => 2, /// ]; /// /// partition_vec.insert(2, -1); /// /// assert!(partition_vec[2] == -1); /// assert!(partition_vec.amount_of_sets() == 4); /// # } /// ``` pub fn insert(&mut self, index: usize, elem: T) { // We update the parents and links above the new value. for i in 0 .. self.meta.len() { let parent = self.meta[i].parent(); if parent >= index { self.meta[i].set_parent(parent + 1); } let link = self.meta[i].link(); if link >= index { self.meta[i].set_link(link + 1); } } self.data.insert(index, elem); self.meta.insert(index, Metadata::new(index)); } /// Removes and returns the element at position index within the `PartitionVec<T>`, /// shifting all elements after it to the left. /// /// This will take `O(n + m)` time where `m` is the size of the set that `index` belongs to. /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 0 => 0, /// 1 => 1, /// 2 => 0, /// 3 => 2, /// ]; /// /// assert!(partition_vec.remove(2) == 2); /// /// assert!(partition_vec[2] == 3); /// assert!(partition_vec.amount_of_sets() == 3); /// # } /// ``` pub fn remove(&mut self, index: usize) -> T { self.make_singleton(index); self.meta.remove(index); // We lower all values that point above the index. for i in 0 .. self.meta.len() { let parent = self.meta[i].parent(); if parent > index { self.meta[i].set_parent(parent - 1); } let link = self.meta[i].link(); if link > index { self.meta[i].set_link(link - 1); } } self.data.remove(index) } /// Moves all the elements of `other` into `self`, leaving `other` empty. /// /// # Panics /// /// Panics if the number of elements in de `PartitionVec<T>` overflows a `usize`. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut first = partition_vec![ /// 'a' => 0, /// 'b' => 1, /// 'c' => 1, /// ]; /// let mut second = partition_vec![ /// 'a' => 0, /// 'b' => 0, /// 'c' => 1, /// ]; /// /// first.append(&mut second); /// /// assert!(first.len() == 6); /// assert!(second.len() == 0); /// /// assert!(first.amount_of_sets() == 4); /// assert!(second.amount_of_sets() == 0); /// # } /// ``` pub fn append(&mut self, other: &mut Self) { let old_len = self.len(); self.data.append(&mut other.data); self.meta.extend(other.meta.drain(..).map(|meta| { let old_parent = meta.parent(); meta.set_parent(old_parent + old_len); let old_link = meta.link(); meta.set_link(old_link + old_len); meta })); } /// Reserves capacity for at least `additional` more elements to be /// inserted in the given `PartitionVec<T>`. /// The collection may reserve more space to avoid frequent reallocation's. /// After calling `reserve`, capacity will be greater than /// or equal to `self.len() + additional`. /// Does nothing if capacity is already sufficient. /// /// # Panics /// /// Panics if the new capacity overflows a `usize`. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![1]; /// partition_vec.reserve(10); /// assert!(partition_vec.capacity() >= 11); /// # } /// ``` #[inline] pub fn reserve(&mut self, additional: usize) { self.data.reserve(additional); self.meta.reserve(additional); } /// Reserves the minimum capacity for exactly `additional` more elements to be /// inserted in the given `PartitionVec<T>`. /// After calling `reserve_exact`, capacity will be greater than or /// equal to `self.len() + additional`. /// Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it requests. /// Therefore capacity can not be relied upon to be precisely minimal. /// Prefer `reserve` if future insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows a `usize`. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![1]; /// partition_vec.reserve_exact(10); /// assert!(partition_vec.capacity() >= 11); /// # } /// ``` #[inline] pub fn reserve_exact(&mut self, additional: usize) { self.data.reserve_exact(additional); self.meta.reserve_exact(additional); } /// Shrinks the capacity of the `PartitionVec<T>` as much as possible. /// /// It will drop down as close as possible to the length but the allocator /// may still inform the `PartitionVec<T>` that there is space for a few more /// elements. /// /// # Examples /// /// ``` /// let mut partition_vec = partitions::PartitionVec::with_capacity(10); /// /// partition_vec.extend([1, 2, 3].iter().cloned()); /// /// assert!(partition_vec.capacity() == 10); /// /// partition_vec.shrink_to_fit(); /// /// assert!(partition_vec.capacity() >= 3); /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.data.shrink_to_fit(); self.meta.shrink_to_fit(); } /// Shortens the `PartitionVec<T>`, keeping the first `new_len` elements and /// dropping the rest. /// /// If `new_len` is greater than or equal to the collections current length, /// this has no effect. /// /// Note that this method has no effect on the allocated capacity of the /// collection. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 'a' => 0, /// 'b' => 1, /// 'c' => 0, /// 'd' => 1, /// 'e' => 2, /// ]; /// /// partition_vec.truncate(3); /// assert!(partition_vec.len() == 3); /// assert!(partition_vec.capacity() == 5); /// assert!(partition_vec.len_of_set(0) == 2); /// assert!(partition_vec.len_of_set(1) == 1); /// assert!(partition_vec.len_of_set(2) == 2); /// # } /// ``` pub fn truncate(&mut self, new_len: usize) { if new_len >= self.len() { return } for i in 0 .. new_len { let parent = self.meta[i].parent(); let mut current = self.meta[i].link(); if parent >= new_len { // We make `i` the new root. self.meta[i].set_parent(i); self.meta[i].set_rank(1); let mut previous = i; // The last index we saw before we went out of the new bounds. let mut index_before_oob = if current >= new_len { Some(previous) } else { None }; while current != i { if current >= new_len { // If the current is above the new length we update this value if needed. if index_before_oob.is_none() { index_before_oob = Some(previous); } } else if let Some(index) = index_before_oob { // If we are back in bounds for the first time we update the link. self.meta[index].set_link(current); index_before_oob = None; } self.meta[current].set_parent(i); previous = current; current = self.meta[current].link(); } if let Some(index) = index_before_oob { self.meta[index].set_link(i); } } else if current >= new_len { while current >= new_len { current = self.meta[current].link(); } self.meta[i].set_link(current); } } self.data.truncate(new_len); self.meta.truncate(new_len); } /// Resizes the `PartitionVec<T>` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the collection is extended by the /// difference, with each additional slot filled with `value`. /// If `new_len` is less than `len`, the collection is simply truncated. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![4, 9]; /// partition_vec.resize(4, 0); /// assert!(partition_vec.as_slice() == &[4, 9, 0, 0]); /// /// let mut partition_vec = partition_vec![ /// 4 => 0, /// 1 => 1, /// 3 => 5, /// 1 => 1, /// 1 => 3, /// ]; /// partition_vec.resize(2, 0); /// assert!(partition_vec.as_slice() == &[4, 1]); /// # } /// ``` #[inline] pub fn resize(&mut self, new_len: usize, value: T) where T: Clone { let len = self.len(); match Ord::cmp(&new_len, &len) { Ordering::Less => self.truncate(new_len), Ordering::Equal => {}, Ordering::Greater => { self.data.append(&mut vec![value; new_len - len]); self.meta.extend((len .. new_len).map(Metadata::new)); } } } /// Clears the `PartitionVec<T>`, removing all values. /// /// Note that this method has no effect on the allocated capacity of the collection. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![2, 3, 4]; /// assert!(!partition_vec.is_empty()); /// partition_vec.clear(); /// assert!(partition_vec.is_empty()); /// # } /// ``` #[inline] pub fn clear(&mut self) { self.data.clear(); self.meta.clear(); } /// Returns `true` if the partition_vec contains no elements. /// /// # Examples /// /// ``` /// let mut partition_vec = partitions::PartitionVec::new(); /// assert!(partition_vec.is_empty()); /// /// partition_vec.push(1); /// assert!(!partition_vec.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool {
Converts the `PartitionVec<T>` into `Box<[T]>`. /// /// Note that this will drop any excess capacity. /// This will not take the sets of the `PartitionVec<T>` in to account at all. /// /// # Examples /// /// ``` /// let mut partition_vec = partitions::PartitionVec::with_capacity(10); /// partition_vec.extend([1, 2, 3].iter().cloned()); /// /// assert!(partition_vec.capacity() == 10); /// let slice = partition_vec.into_boxed_slice(); /// assert!(slice.into_vec().capacity() == 3); /// ``` #[inline] pub fn into_boxed_slice(self) -> Box<[T]> { self.data.into_boxed_slice() } /// Extracts a slice containing the entire `PartitionVec<T>`. /// /// Equivalent to `&partition_vec[..]`. /// This will not take the sets of the `PartitionVec<T>` in to account at all. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// use std::io::{self, Write}; /// let buffer = partition_vec![1, 2, 3, 4, 5]; /// io::sink().write(buffer.as_slice()).unwrap(); /// # } /// ``` #[inline] pub fn as_slice(&self) -> & [T] { self.data.as_slice() } /// Extracts a mutable slice containing the entire `PartitionVec<T>`. /// /// Equivalent to `&mut partition_vec[..]`. /// This will not take the sets of the `PartitionVec<T>` in to account at all. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// use std::io::{self, Read}; /// let mut buffer = partition_vec![0; 3]; /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap(); /// # } #[inline] pub fn as_mut_slice(&mut self) -> &mut [T] { self.data.as_mut_slice() } /// Returns an iterator over the elements of the set that `index` belongs to. /// /// The iterator returned yields pairs `(i, &value)` where `i` is the index of the value and /// `value` is the value itself. /// /// The order the elements are returned in is not specified. /// /// # Panics /// /// If `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec![ /// 'a' => "first set", /// 'b' => "first set", /// 'c' => "second set", /// 'd' => "second set", /// ]; /// /// let mut done = [0, 0, 0, 0]; /// for (index, value) in partition_vec.set(0) { /// assert!(*value == 'a' || *value == 'b'); /// done[index] += 1; /// } /// for (index, value) in partition_vec.set(1) { /// assert!(*value == 'a' || *value == 'b'); /// done[index] += 1; /// } /// for (index, value) in partition_vec.set(2) { /// assert!(*value == 'c' || *value == 'd'); /// done[index] += 1; /// } /// // We visited the first set twice and the second set once. /// assert!(done == [2, 2, 1, 1]); /// # } /// ``` #[inline] pub fn set(&self, index: usize) -> Set<T> { let root = self.find_final(index); self.meta[root].set_rank(1); Set { partition_vec: self, current: Some(root), root, } } /// Returns an iterator over the elements of the set that `index` belongs to. /// /// The iterator returned yields pairs `(i, &mut value)` where `i` is the index of the value and /// `value` is the value itself. /// /// The order the elements are returned in is not specified. /// /// # Panics /// /// If `index` is out of bounds. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 0 => 'a', /// 0 => 'b', /// 0 => 'b', /// 0 => 'c', /// ]; /// /// assert!(partition_vec.as_slice() == &[0, 0, 0, 0]); /// for (index, value) in partition_vec.set_mut(2) { /// assert!(index == 1 || index == 2); /// *value += 1; /// } /// assert!(partition_vec.as_slice() == &[0, 1, 1, 0]); /// # } /// ``` #[inline] pub fn set_mut(&mut self, index: usize) -> SetMut<T> { let root = self.find_final(index); self.meta[root].set_rank(1); SetMut { partition_vec: self, current: Some(root), root, } } /// Returns an iterator over all sets of the `PartitionVec<T>`. /// /// The iterator returned yields `Set` iterators. /// These `Set` iterators yield pairs `(i, &value)` where `i` is the index of /// the value and `value` is the value itself. /// /// The sets are returned in order by there first member. /// The order the elements of a `Set` are returned in is not specified. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let partition_vec = partition_vec![ /// 0 => 'a', /// 0 => 'a', /// 2 => 'b', /// 2 => 'b', /// 4 => 'c', /// 4 => 'c', /// ]; /// /// for set in partition_vec.all_sets() { /// let mut count = 0; /// for (index, value) in set { /// assert!(index == *value || index == *value + 1); /// count += 1; /// } /// assert!(count == 2); /// } /// # } /// ``` #[inline] pub fn all_sets(&self) -> AllSets<T> { let len = self.len(); AllSets { partition_vec: self, done: bit_vec![false; len], range: 0 .. len, } } /// Returns an iterator over all sets of the `PartitionVec<T>`. /// /// The iterator returned yields `SetMut` iterators. /// These `SetMut` iterators yield pairs `(i, &mut value)` where `i` is the index of /// the value and `value` is the value itself. /// /// The sets are returned in order by there first member. /// The order the elements of a `SetMut` are returned in is not specified. /// /// # Examples /// /// ``` /// # #[macro_use] /// # extern crate partitions; /// # /// # fn main() { /// let mut partition_vec = partition_vec![ /// 0 => 'a', /// 0 => 'b', /// 0 => 'a', /// 0 => 'b', /// 0 => 'c', /// 0 => 'c', /// ]; /// /// assert!(partition_vec.as_slice() == &[0, 0, 0, 0, 0, 0]); /// /// for (set_number, set_mut) in partition_vec.all_sets_mut().enumerate() { /// for (index, value) in set_mut { /// assert!(index < 6); /// *value = set_number; /// } /// } /// /// assert!(partition_vec.as_slice() == &[0, 1, 0, 1, 2, 2]); /// # } /// ``` #[inline] pub fn all_sets_mut(&mut self) -> AllSetsMut<T> { let len = self.len(); AllSetsMut { partition_vec: self, done: bit_vec![false; len], range: 0 .. len, } } /// This method is used by the `partition_vec!` macro. #[doc(hidden)] #[inline] pub fn from_elem(elem: T, len: usize) -> Self where T: Clone { Self { data: vec![elem; len], meta: (0 .. len).map(Metadata::new).collect(), } } pub(crate) unsafe fn set_len(&mut self, len: usize) { self.data.set_len(len); self.meta.set_len(len); } pub(crate) unsafe fn insert_over_lazy_removed(&mut self, index: usize, value: T) -> usize { let marked_value = self.meta[index].marked_value(); std::ptr::write(&mut self.data[index], value); self.meta[index] = Metadata::new(index); marked_value } pub(crate) unsafe fn lazy_remove(&mut self, index: usize, marked_value: usize) -> T { self.make_singleton(index); let value = std::ptr::read(&self.data[index]); self.meta[index].set_marked_value(marked_value); value } pub(crate) fn clear_lazy_removed(&mut self) { for i in 0 .. self.len() { if !self.meta[i].is_marked() { unsafe { drop(std::ptr::read(&self.data[i])); } } } unsafe { self.set_len(0); } } pub(crate) unsafe fn push_lazy_removed(&mut self) { let index = self.len(); self.reserve(1); self.set_len(index + 1); self.meta[index] = Metadata::new(0); self.meta[index].set_marked_value(!0); } } impl<T> Default for PartitionVec<T> { fn default() -> Self { Self::new() } } impl<T> std::fmt::Debug for PartitionVec<T> where T: std::fmt::Debug { fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { // We map the roots to `usize` names. let mut map = std::collections::HashMap::with_capacity(self.len()); let mut builder = formatter.debug_list(); let mut names = 0; for i in 0 .. self.len() { let root = self.find(i); let name = if let Some(&name) = map.get(&root) { // If we already have a name we use it. name } else { // If we don't we make a new name. let new_name = names; map.insert(root, new_name); names += 1; new_name }; builder.entry(&format_args!("{:?} => {}", self.data[i], name)); } builder.finish() } } impl<T> PartialEq for PartitionVec<T> where T: PartialEq { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { return false } // We map the roots of self to the roots of other. let mut map = std::collections::HashMap::with_capacity(self.len()); for i in 0 .. self.len() { if self.data[i] != other.data[i] { return false } let self_root = self.find(i); let other_root = other.find(i); if let Some(&root) = map.get(&self_root) { // If we have seen this root we check if we have the same map. if root != other_root { return false } } else { // If we have not seen this root we add the relation to the map. map.insert(self_root, other_root); } } true } } impl<T> Eq for PartitionVec<T> where T: Eq {} impl<T, I> ops::Index<I> for PartitionVec<T> where I: std::slice::SliceIndex<[T]> { type Output = I::Output; #[inline] fn index(&self, index: I) -> &I::Output { (**self).index(index) } } impl<T, I> ops::IndexMut<I> for PartitionVec<T> where I: std::slice::SliceIndex<[T]> { #[inline] fn index_mut(&mut self, index: I) -> &mut I::Output { (**self).index_mut(index) } } impl<T> ops::Deref for PartitionVec<T> { type Target = [T]; fn deref(&self) -> &[T] { &self.data } } impl<T> ops::DerefMut for PartitionVec<T> { fn deref_mut(&mut self) -> &mut [T] { &mut self.data } } impl<T> From<Vec<T>> for PartitionVec<T> { fn from(vec: Vec<T>) -> Self { let len = vec.len(); Self { data: vec, meta: (0 .. len).map(Metadata::new).collect(), } } } impl<T> FromIterator<T> for PartitionVec<T> { fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = T> { let data = Vec::from_iter(iter); let len = data.len(); Self { data, meta: (0 .. len).map(Metadata::new).collect(), } } } impl<'a, T> FromIterator<&'a T> for PartitionVec<T> where T: Copy + 'a { fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = &'a T> { Self::from_iter(iter.into_iter().cloned()) } } #[cfg(feature = "rayon")] impl<T> FromParallelIterator<T> for PartitionVec<T> where T: Send { fn from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T> { let par_iter = par_iter.into_par_iter(); let mut partition = if let Some(len) = par_iter.opt_len() { Self::with_capacity(len) } else { Self::new() }; partition.par_extend(par_iter); partition } } #[cfg(feature = "rayon")] impl<'a, T> FromParallelIterator<&'a T> for PartitionVec<T> where T: Copy+ Send + Sync + 'a { fn from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = &'a T> { Self::from_par_iter(par_iter.into_par_iter().cloned()) } } impl<T> IntoIterator for PartitionVec<T> { type Item = T; type IntoIter = std::vec::IntoIter<T>; fn into_iter(self) -> std::vec::IntoIter<T> { self.data.into_iter() } } impl<'a, T> IntoIterator for &'a PartitionVec<T> { type Item = &'a T; type IntoIter = std::slice::Iter<'a, T>; fn into_iter(self) -> std::slice::Iter<'a, T> { self.data.iter() } } impl<'a, T> IntoIterator for &'a mut PartitionVec<T> { type Item = &'a mut T; type IntoIter = std::slice::IterMut<'a, T>; fn into_iter(self) -> std::slice::IterMut<'a, T> { self.data.iter_mut() } } #[cfg(feature = "rayon")] impl<T> IntoParallelIterator for PartitionVec<T> where T: Send { type Item = T; type Iter = rayon::vec::IntoIter<T>; fn into_par_iter(self) -> Self::Iter { self.data.into_par_iter() } } #[cfg(feature = "rayon")] impl<'a, T> IntoParallelIterator for &'a PartitionVec<T> where T: Send + Sync { type Item = &'a T; type Iter = rayon::slice::Iter<'a, T>; fn into_par_iter(self) -> Self::Iter { self.data.par_iter() } } #[cfg(feature = "rayon")] impl<'a, T> IntoParallelIterator for &'a mut PartitionVec<T> where T: Send + Sync { type Item = &'a mut T; type Iter = rayon::slice::IterMut<'a, T>; fn into_par_iter(self) -> Self::Iter { self.data.par_iter_mut() } } impl<T> Extend<T> for PartitionVec<T> { fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item = T> { let len = self.len(); self.data.extend(iter); let new_len = self.data.len(); self.meta.extend((len .. new_len).map(Metadata::new)); } } impl<'a, T> Extend<&'a T> for PartitionVec<T> where T: Copy + 'a { fn extend<I>(&mut self, iter: I) where I: IntoIterator<Item = &'a T> { let len = self.len(); self.data.extend(iter); let new_len = self.data.len(); self.meta.extend((len .. new_len).map(Metadata::new)); } } #[cfg(feature = "rayon")] impl<T> ParallelExtend<T> for PartitionVec<T> where T: Send { fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T> { let par_iter = par_iter.into_par_iter(); self.data.par_extend(par_iter); self.meta.par_extend((0 .. self.data.len()).into_par_iter().map(Metadata::new)); } } #[cfg(feature = "rayon")] impl<'a, T> ParallelExtend<&'a T> for PartitionVec<T> where T: Copy + Send + Sync + 'a { fn par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = &'a T> { self.par_extend(par_iter.into_par_iter().cloned()) } } #[cfg(feature = "proptest")] impl<T> Arbitrary for PartitionVec<T> where T: Arbitrary, T::Strategy: 'static, { type Parameters = (proptest::collection::SizeRange, T::Parameters); type Strategy = BoxedStrategy<Self>; fn arbitrary_with(params: Self::Parameters) -> Self::Strategy { use std::collections::hash_map; let (size_range, params) = params; let params = (size_range, (params, ())); (Vec::<(T, usize)>::arbitrary_with(params)).prop_map(|vec| { let mut partition_vec = Self::with_capacity(vec.len()); // We map a `set_number` to an `index` of that set. let mut map = hash_map::HashMap::with_capacity(vec.len()); for (index, (value, mut set_number)) in vec.into_iter().enumerate() { partition_vec.push(value); let set_number = set_number.trailing_zeros(); match map.entry(set_number) { hash_map::Entry::Occupied(occupied) => { partition_vec.union(index, *occupied.get()); }, hash_map::Entry::Vacant(vacant) => { vacant.insert(index); } } } partition_vec }).boxed() } } /// An iterator over a set in a `PartitionVec<T>`. /// /// This struct is created by the [`set`] method on [`PartitionVec<T>`]. /// See its documentation for more. /// /// [`set`]: struct.PartitionVec.html#method.set /// [`PartitionVec<T>`]: struct.PartitionVec.html #[derive(Clone, Debug)] pub struct Set<'a, T: 'a> { partition_vec: &'a PartitionVec<T>, current: Option<usize>, root: usize, } impl<'a, T> Iterator for Set<'a, T> { type Item = (usize, &'a T); fn next(&mut self) -> Option<(usize, &'a T)> { let current = self.current?; self.partition_vec.meta[current].set_parent(self.root); let next = self.partition_vec.meta[current].link(); // We started at the root. self.current = if next == self.root { None } else { Some(next) }; Some((current, &self.partition_vec.data[current])) } } impl<'a, T> FusedIterator for Set<'a, T> {} /// An iterator over a set in a `PartitionVec<T>` that allows mutating elements. /// /// This struct is created by the [`set_mut`] method on [`PartitionVec<T>`]. /// See its documentation for more. /// /// [`set_mut`]: struct.PartitionVec.html#method.set_mut /// [`PartitionVec<T>`]: struct.PartitionVec.html #[derive(Debug)] pub struct SetMut<'a, T: 'a> { partition_vec: &'a mut PartitionVec<T>, current: Option<usize>, root: usize, } impl<'a, T> Iterator for SetMut<'a, T> { type Item = (usize, &'a mut T); fn next(&mut self) -> Option<(usize, &'a mut T)> { let current = self.current?; self.partition_vec.meta[current].set_parent(self.root); let next = self.partition_vec.meta[current].link(); // We started at the root. self.current = if next == self.root { None } else { Some(next) }; // This iterator wont give a reference to this value again so it is safe to extend // the lifetime of the mutable reference. unsafe { Some((current, extend_mut(&mut self.partition_vec.data[current]))) } } } impl<'a, T> FusedIterator for SetMut<'a, T> {} /// An iterator over all sets in a `PartitionVec<T>`. /// /// This struct is created by the [`all_sets`] method on [`PartitionVec<T>`]. /// See its documentation for more information. /// /// [`all_sets`]: struct.PartitionVec.html#method.all_sets /// [`PartitionVec<T>`]: struct.PartitionVec.html #[derive(Clone, Debug)] pub struct AllSets<'a, T: 'a> { partition_vec: &'a PartitionVec<T>, done: bit_vec::BitVec, range: ops::Range<usize>, } impl<'a, T> Iterator for AllSets<'a, T> { type Item = Set<'a, T>; fn next(&mut self) -> Option<Set<'a, T>> { // We keep going until we find a set we have not returned yet. loop { let index = self.range.next()?; let root = self.partition_vec.find_final(index); // If we have not returned this set yet. if !self.done.get(root).unwrap() { self.done.set(root, true); return Some(Set { partition_vec: self.partition_vec, current: Some(root), root, }) } } } } impl<'a, T> DoubleEndedIterator for AllSets<'a, T> { fn next_back(&mut self) -> Option<Set<'a, T>> { // We keep going until we find a set we have not returned yet. loop { let index = self.range.next_back()?; let root = self.partition_vec.find_final(index); // If we have not returned this set yet. if !self.done.get(root).unwrap() { self.done.set(root, true); return Some(Set { partition_vec: self.partition_vec, current: Some(root), root, }) } } } } impl<'a, T> FusedIterator for AllSets<'a, T> {} /// An iterator over all sets in a `PartitionVec<T>` that allows mutating elements. /// /// This struct is created by the [`all_sets`] method on [`PartitionVec<T>`]. /// See its documentation for more information. /// /// [`all_sets`]: struct.PartitionVec.html#method.all_sets /// [`PartitionVec<T>`]: struct.PartitionVec.html #[derive(Debug)] pub struct AllSetsMut<'a, T: 'a> { partition_vec: &'a mut PartitionVec<T>, done: bit_vec::BitVec, range: ops::Range<usize>, } impl<'a, T> Iterator for AllSetsMut<'a, T> { type Item = SetMut<'a, T>; fn next(&mut self) -> Option<SetMut<'a, T>> { // We keep going until we find a set we have not returned yet. loop { let index = self.range.next()?; let root = self.partition_vec.find_final(index); // If we have not returned this set yet. if !self.done.get(root).unwrap() { self.done.set(root, true); // This is safe because we will not return this set again. unsafe { return Some(SetMut { partition_vec: extend_mut(self).partition_vec, current: Some(root), root, })} } } } } impl<'a, T> DoubleEndedIterator for AllSetsMut<'a, T> { fn next_back(&mut self) -> Option<SetMut<'a, T>> { // We keep going until we find a set we have not returned yet. loop { let index = self.range.next_back()?; let root = self.partition_vec.find_final(index); // If we have not returned this set yet. if !self.done.get(root).unwrap() { self.done.set(root, true); // This is safe because we will not return this set again. unsafe { return Some(SetMut { partition_vec: extend_mut(self).partition_vec, current: Some(root), root, })} } } } } impl<'a, T> FusedIterator for AllSetsMut<'a, T> {}
self.data.is_empty() } ///
request.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use crate::{ backoff::{Backoff, NoBackoff, NoJitterBackoff}, pd::PdClient, stats::tikv_stats, store::Store, transaction::{resolve_locks, HasLocks}, BoundRange, Error, ErrorKind, Key, Result, }; use async_trait::async_trait; use futures::{prelude::*, stream::BoxStream}; use std::{ cmp::{max, min}, sync::Arc, }; use tikv_client_store::{HasError, HasRegionError, Request}; const DEFAULT_REGION_BACKOFF: NoJitterBackoff = NoJitterBackoff::new(2, 500, 10); pub const OPTIMISTIC_BACKOFF: NoJitterBackoff = NoJitterBackoff::new(2, 500, 10); pub const PESSIMISTIC_BACKOFF: NoBackoff = NoBackoff; #[async_trait] pub trait KvRequest: Request + Clone + Sync + Send + 'static + Sized { type Result; type RpcResponse: HasError + HasLocks + Clone + Send + 'static; /// A single `KvRequest` can be divided into a number of RPC requests because the keys span /// several regions or a single RPC request is too large. Most of the fields in these requests /// share the same content while `KeyData`, which contains keys (and associated data if any), /// is the part which differs among the requests. type KeyData: Send; async fn execute<Pd: PdClient, B: Backoff>( self, pd_client: Arc<Pd>, lock_backoff: B, ) -> Result<Self::Result> { Self::reduce( self.response_stream(pd_client, lock_backoff) .and_then(|mut response| match response.error() { Some(e) => future::err(e), None => future::ok(response), }) .map_ok(Self::map_result) .boxed(), ) .await } fn response_stream( self, pd_client: Arc<impl PdClient>, lock_backoff: impl Backoff, ) -> BoxStream<'static, Result<Self::RpcResponse>> { self.retry_response_stream(pd_client, DEFAULT_REGION_BACKOFF, lock_backoff) } fn retry_response_stream( mut self, pd_client: Arc<impl PdClient>, region_backoff: impl Backoff, lock_backoff: impl Backoff, ) -> BoxStream<'static, Result<Self::RpcResponse>> { let stores = self.store_stream(pd_client.clone()); stores .and_then(move |(key_data, store)| { let request = self.make_rpc_request(key_data, &store); async move { let request = request?; let stats = tikv_stats(request.label()); let response = store.dispatch::<_, Self::RpcResponse>(&request).await; let response = stats.done(response)?; Ok((request, *response)) } }) .map_ok(move |(request, mut response)| { if let Some(region_error) = response.region_error() { return request.on_region_error( region_error, pd_client.clone(), region_backoff.clone(), lock_backoff.clone(), ); } // Resolve locks let locks = response.take_locks(); if !locks.is_empty() { let pd_client = pd_client.clone(); let region_backoff = region_backoff.clone(); let lock_backoff = lock_backoff.clone(); return resolve_locks(locks, pd_client.clone()) .map_ok(move |resolved| { if !resolved { request.on_resolve_lock_failed( pd_client, region_backoff, lock_backoff, ) } else
}) .try_flatten_stream() .boxed(); } stream::once(future::ok(response)).boxed() }) .try_flatten() .boxed() } fn on_region_error( self, region_error: Error, pd_client: Arc<impl PdClient>, mut region_backoff: impl Backoff, lock_backoff: impl Backoff, ) -> BoxStream<'static, Result<Self::RpcResponse>> { region_backoff.next_delay_duration().map_or( stream::once(future::err(region_error)).boxed(), move |delay_duration| { let fut = async move { futures_timer::Delay::new(delay_duration).await; Ok(()) }; fut.map_ok(move |_| { self.retry_response_stream(pd_client, region_backoff, lock_backoff) }) .try_flatten_stream() .boxed() }, ) } fn on_resolve_lock_failed( self, pd_client: Arc<impl PdClient>, region_backoff: impl Backoff, mut lock_backoff: impl Backoff, ) -> BoxStream<'static, Result<Self::RpcResponse>> { lock_backoff.next_delay_duration().map_or( stream::once(future::err(ErrorKind::ResolveLockError.into())).boxed(), move |delay_duration| { let fut = async move { futures_timer::Delay::new(delay_duration).await; Ok(()) }; fut.map_ok(move |_| { self.retry_response_stream(pd_client, region_backoff, lock_backoff) }) .try_flatten_stream() .boxed() }, ) } fn store_stream<PdC: PdClient>( &mut self, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<(Self::KeyData, Store)>>; fn make_rpc_request(&self, key_data: Self::KeyData, store: &Store) -> Result<Self>; fn map_result(result: Self::RpcResponse) -> Self::Result; async fn reduce(results: BoxStream<'static, Result<Self::Result>>) -> Result<Self::Result>; fn request_from_store(&self, store: &Store) -> Result<Self> where Self: Default, { let mut request = Self::default(); request.set_context(store.region.context()?); Ok(request) } } pub fn store_stream_for_key<KeyData, PdC>( key_data: KeyData, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<(KeyData, Store)>> where KeyData: AsRef<Key> + Send + 'static, PdC: PdClient, { pd_client .store_for_key(key_data.as_ref().clone()) .map_ok(move |store| (key_data, store)) .into_stream() .boxed() } /// Maps keys to a stream of stores. `key_data` must be sorted in increasing order pub fn store_stream_for_keys<KeyData, IntoKey, I, PdC>( key_data: I, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<(Vec<KeyData>, Store)>> where KeyData: AsRef<Key> + Send + Sync + 'static, IntoKey: Into<KeyData> + 'static, I: IntoIterator<Item = IntoKey>, I::IntoIter: Send + Sync + 'static, PdC: PdClient, { pd_client .clone() .group_keys_by_region(key_data.into_iter().map(Into::into)) .and_then(move |(region_id, key)| { pd_client .clone() .store_for_id(region_id) .map_ok(move |store| (key, store)) }) .boxed() } pub fn store_stream_for_range<PdC: PdClient>( range: BoundRange, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<((Key, Key), Store)>> { pd_client .stores_for_range(range.clone()) .map_ok(move |store| { let region_range = store.region.range(); (bound_range(region_range, range.clone()), store) }) .into_stream() .boxed() } /// The range used for request should be the intersection of `region_range` and `range`. fn bound_range(region_range: (Key, Key), range: BoundRange) -> (Key, Key) { let (lower, upper) = region_range; let (lower_bound, upper_bound) = range.into_keys(); let up = match (upper.is_empty(), upper_bound) { (_, None) => upper, (true, Some(ub)) => ub, (_, Some(ub)) if ub.is_empty() => upper, (_, Some(ub)) => min(upper, ub), }; (max(lower, lower_bound), up) } pub fn store_stream_for_ranges<PdC: PdClient>( ranges: Vec<BoundRange>, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<(Vec<BoundRange>, Store)>> { pd_client .clone() .group_ranges_by_region(ranges) .and_then(move |(region_id, range)| { pd_client .clone() .store_for_id(region_id) .map_ok(move |store| (range, store)) }) .into_stream() .boxed() } #[cfg(test)] mod test { use super::*; use crate::mock::{MockKvClient, MockPdClient}; use futures::executor; use grpcio::CallOption; use std::{any::Any, sync::Mutex}; use tikv_client_proto::{kvrpcpb, tikvpb::TikvClient}; #[test] fn test_region_retry() { #[derive(Clone)] struct MockRpcResponse; impl HasError for MockRpcResponse { fn error(&mut self) -> Option<Error> { unreachable!() } } impl HasRegionError for MockRpcResponse { fn region_error(&mut self) -> Option<Error> { Some(Error::region_not_found(1)) } } impl HasLocks for MockRpcResponse {} #[derive(Clone)] struct MockKvRequest { test_invoking_count: Arc<Mutex<usize>>, } #[async_trait] impl Request for MockKvRequest { async fn dispatch(&self, _: &TikvClient, _: CallOption) -> Result<Box<dyn Any>> { Ok(Box::new(MockRpcResponse {})) } fn label(&self) -> &'static str { "mock" } fn as_any(&self) -> &dyn Any { self } fn set_context(&mut self, _: kvrpcpb::Context) { unreachable!(); } } #[async_trait] impl KvRequest for MockKvRequest { type Result = (); type RpcResponse = MockRpcResponse; type KeyData = Key; fn make_rpc_request(&self, _key_data: Self::KeyData, _store: &Store) -> Result<Self> { Ok(Self { test_invoking_count: self.test_invoking_count.clone(), }) } fn map_result(_: Self::RpcResponse) -> Self::Result {} async fn reduce( _results: BoxStream<'static, Result<Self::Result>>, ) -> Result<Self::Result> { unreachable!() } fn store_stream<PdC: PdClient>( &mut self, pd_client: Arc<PdC>, ) -> BoxStream<'static, Result<(Self::KeyData, Store)>> { // Increases by 1 for each call. let mut test_invoking_count = self.test_invoking_count.lock().unwrap(); *test_invoking_count += 1; store_stream_for_key(Key::from("mock_key".to_owned()), pd_client) } } let invoking_count = Arc::new(Mutex::new(0)); let request = MockKvRequest { test_invoking_count: invoking_count.clone(), }; let pd_client = Arc::new(MockPdClient::new(MockKvClient::with_dispatch_hook( |_: &dyn Any| Ok(Box::new(MockRpcResponse) as Box<dyn Any>), ))); let region_backoff = NoJitterBackoff::new(1, 1, 3); let lock_backoff = NoJitterBackoff::new(1, 1, 3); let stream = request.retry_response_stream(pd_client, region_backoff, lock_backoff); executor::block_on(async { stream.collect::<Vec<Result<MockRpcResponse>>>().await }); // Original call plus the 3 retries assert_eq!(*invoking_count.lock().unwrap(), 4); } }
{ request.response_stream(pd_client, OPTIMISTIC_BACKOFF) }
mod.rs
use std::{collections::HashMap, fmt}; use crate::type_engine::resolve_type; use crate::{ asm_generation::expression::convert_abi_fn_to_asm, asm_lang::{ allocated_ops::{AllocatedOp, AllocatedRegister}, virtual_register::*, Label, Op, OrganizationalOp, RealizedOp, VirtualImmediate12, VirtualImmediate24, VirtualOp, }, error::*, parse_tree::Literal, semantic_analysis::{ Namespace, TypedAstNode, TypedAstNodeContent, TypedDeclaration, TypedFunctionDeclaration, TypedParseTree, }, types::ResolvedType, BuildConfig, Ident, TypeInfo, }; use either::Either; pub(crate) mod checks; pub(crate) mod compiler_constants; mod declaration; mod expression; mod finalized_asm; pub(crate) mod from_ir; mod register_sequencer; mod while_loop; pub(crate) use declaration::*; pub(crate) use expression::*; pub use finalized_asm::FinalizedAsm; pub(crate) use register_sequencer::*; use while_loop::convert_while_loop_to_asm; // Initially, the bytecode will have a lot of individual registers being used. Each register will // have a new unique identifier. For example, two separate invocations of `+` will result in 4 // registers being used for arguments and 2 for outputs. // // After that, the level 0 bytecode will go through a process where register use is minified, // producing level 1 bytecode. This process is as such: // // 1. Detect the last time a register is read. After that, it can be reused and recycled to fit the // needs of the next "level 0 bytecode" register // // 2. Detect needless assignments and movements, and substitute registers in. // i.e. // a = b // c = a // // would become // c = b // // // After the level 1 bytecode is produced, level 2 bytecode is created by limiting the maximum // number of registers and inserting bytecode to read from/write to memory where needed. Ideally, // the algorithm for determining which registers will be written off to memory is based on how // frequently that register is accessed in a particular section of code. Using this strategy, we // hope to minimize memory writing. // // For each line, the number of times a virtual register is accessed between then and the end of the // program is its register precedence. A virtual register's precedence is 0 if it is currently in // "memory", and the above described number if it is not. This prevents over-prioritization of // registers that have already been written off to memory. // /// The [SwayAsmSet] contains either a contract ABI and corresponding ASM, a script's main /// function's ASM, or a predicate's main function's ASM. ASM is never generated for libraries, /// as that happens when the library itself is imported. pub enum SwayAsmSet { ContractAbi { data_section: DataSection, program_section: AbstractInstructionSet, }, ScriptMain { data_section: DataSection, program_section: AbstractInstructionSet, }, PredicateMain { data_section: DataSection, program_section: AbstractInstructionSet, }, // Libraries do not generate any asm. Library, } /// An [AbstractInstructionSet] is a set of instructions that use entirely virtual registers /// and excessive moves, with the intention of later optimizing it. #[derive(Clone)] pub struct AbstractInstructionSet { ops: Vec<Op>, } /// "Realized" here refers to labels -- there are no more organizational /// ops or labels. In this struct, they are all "realized" to offsets. pub struct RealizedAbstractInstructionSet { ops: Vec<RealizedOp>, } impl RealizedAbstractInstructionSet { fn allocate_registers(self) -> InstructionSet { // Eventually, we will use a cool graph-coloring algorithm. // For now, just keep a pool of registers and return // registers when they are not read anymore // construct a mapping from every op to the registers it uses let op_register_mapping = self .ops .into_iter() .map(|op| { ( op.clone(), op.opcode.registers().into_iter().cloned().collect(), ) }) .collect::<Vec<_>>(); // get registers from the pool. let mut pool = RegisterPool::init(); let mut buf = vec![]; for (ix, (op, _)) in op_register_mapping.iter().enumerate() { buf.push(AllocatedOp { opcode: op .opcode .allocate_registers(&mut pool, &op_register_mapping, ix), comment: op.comment.clone(), owning_span: op.owning_span.clone(), }) } InstructionSet { ops: buf } } } /// An [InstructionSet] is produced by allocating registers on an [AbstractInstructionSet]. #[derive(Clone)] pub struct InstructionSet { ops: Vec<AllocatedOp>, } type Data = Literal; impl AbstractInstructionSet { /// Removes any jumps that jump to the subsequent line fn remove_sequential_jumps(&self) -> AbstractInstructionSet { let mut buf = vec![]; for i in 0..self.ops.len() - 1 { if let Op { opcode: Either::Right(OrganizationalOp::Jump(ref label)), .. } = self.ops[i] { if let Op { opcode: Either::Right(OrganizationalOp::Label(ref label2)), .. } = self.ops[i + 1] { if label == label2 { // this is a jump to the next line // omit these by doing nothing continue; } } } buf.push(self.ops[i].clone()); } // the last item cannot sequentially jump by definition so we add it in here if let Some(x) = self.ops.last() { buf.push(x.clone()) }; // scan through the jumps and remove any labels that are unused // this could of course be N instead of 2N if i did this in the above for loop. // However, the sweep for unused labels is inevitable regardless of the above phase // so might as well do it here. let mut buf2 = vec![]; for op in &buf { match op.opcode { Either::Right(OrganizationalOp::Label(ref label)) => { if label_is_used(&buf, label) { buf2.push(op.clone()); } } _ => buf2.push(op.clone()), } } AbstractInstructionSet { ops: buf2 } } /// Runs two passes -- one to get the instruction offsets of the labels /// and one to replace the labels in the organizational ops fn realize_labels(self, data_section: &DataSection) -> RealizedAbstractInstructionSet
} #[derive(Debug)] struct RegisterAllocationStatus { reg: AllocatedRegister, in_use: Option<VirtualRegister>, } #[derive(Debug)] pub(crate) struct RegisterPool { registers: Vec<RegisterAllocationStatus>, } impl RegisterPool { fn init() -> Self { let register_pool: Vec<RegisterAllocationStatus> = (0 // - 1 because we reserve the final register for the data_section begin ..compiler_constants::NUM_ALLOCATABLE_REGISTERS) .map(|x| RegisterAllocationStatus { reg: AllocatedRegister::Allocated(x), in_use: None, }) .collect(); Self { registers: register_pool, } } /// Checks if any currently used registers are no longer in use, updates the pool, /// and grabs an available register. pub(crate) fn get_register( &mut self, virtual_register: &VirtualRegister, op_register_mapping: &[(RealizedOp, std::collections::HashSet<VirtualRegister>)], ) -> Option<AllocatedRegister> { // check if this register has already been allocated for if let a @ Some(_) = self.registers.iter().find_map( |RegisterAllocationStatus { reg, in_use }| match in_use { Some(x) if x == virtual_register => Some(reg), _ => None, }, ) { return a.cloned(); } // scan to see if any of the old ones are no longer in use for RegisterAllocationStatus { in_use, .. } in self.registers.iter_mut().filter(|r| r.in_use.is_some()) { if virtual_register_is_never_accessed_again( in_use.as_ref().unwrap(), op_register_mapping, ) { *in_use = None; } } // find the next unused register, return it, assign it let next_available = self .registers .iter_mut() .find(|RegisterAllocationStatus { in_use, .. }| in_use.is_none()); match next_available { Some(RegisterAllocationStatus { in_use, reg }) => { *in_use = Some(virtual_register.clone()); Some(reg.clone()) } None => None, } } } fn virtual_register_is_never_accessed_again( reg: &VirtualRegister, ops: &[(RealizedOp, std::collections::HashSet<VirtualRegister>)], ) -> bool { !ops.iter().any(|(_, regs)| regs.contains(reg)) } /// helper function to check if a label is used in a given buffer of ops fn label_is_used(buf: &[Op], label: &Label) -> bool { buf.iter().any(|Op { ref opcode, .. }| match opcode { Either::Right(OrganizationalOp::Jump(ref l)) if label == l => true, Either::Right(OrganizationalOp::JumpIfNotEq(_reg0, _reg1, ref l)) if label == l => true, _ => false, }) } #[derive(Default, Clone, Debug)] pub struct DataSection { /// the data to be put in the data section of the asm pub value_pairs: Vec<Data>, } impl DataSection { /// Given a [DataId], calculate the offset _from the beginning of the data section_ to the data /// in bytes. pub(crate) fn offset_to_id(&self, id: &DataId) -> usize { self.value_pairs .iter() .take(id.0 as usize) .map(|x| x.to_bytes().len()) .sum() } pub(crate) fn serialize_to_bytes(&self) -> Vec<u8> { // not the exact right capacity but serves as a lower bound let mut buf = Vec::with_capacity(self.value_pairs.len()); for val in &self.value_pairs { buf.append(&mut val.to_bytes().to_vec()); } buf } /// Calculates the return type of the data held at a specific [DataId]. pub(crate) fn type_of_data(&self, id: &DataId) -> Option<ResolvedType> { self.value_pairs.get(id.0 as usize).map(|x| x.as_type()) } /// When generating code, sometimes a hard-coded data pointer is needed to reference /// static values that have a length longer than one word. /// This method appends pointers to the end of the data section (thus, not altering the data /// offsets of previous data). /// `pointer_value` is in _bytes_ and refers to the offset from instruction start to the data /// in question. pub(crate) fn append_pointer(&mut self, pointer_value: u64) -> DataId { let pointer_as_data = Literal::new_pointer_literal(pointer_value); self.insert_data_value(&pointer_as_data) } /// Given any data in the form of a [Literal] (using this type mainly because it includes type /// information and debug spans), insert it into the data section and return its offset as a /// [DataId]. pub(crate) fn insert_data_value(&mut self, data: &Literal) -> DataId { // if there is an identical data value, use the same id match self.value_pairs.iter().position(|x| x == data) { Some(num) => DataId(num as u32), None => { self.value_pairs.push(data.clone()); // the index of the data section where the value is stored DataId((self.value_pairs.len() - 1) as u32) } } } } impl fmt::Display for DataSection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut data_buf = String::new(); for (ix, data) in self.value_pairs.iter().enumerate() { let data_val = match data { Literal::U8(num) => format!(".u8 {:#04x}", num), Literal::U16(num) => format!(".u16 {:#04x}", num), Literal::U32(num) => format!(".u32 {:#04x}", num), Literal::U64(num) => format!(".u64 {:#04x}", num), Literal::Boolean(b) => format!(".bool {}", if *b { "0x01" } else { "0x00" }), Literal::String(st) => format!(".str \"{}\"", st.as_str()), Literal::Byte(b) => format!(".byte {:#08b}", b), Literal::B256(b) => format!( ".b256 0x{}", b.iter() .map(|x| format!("{:02x}", x)) .collect::<Vec<_>>() .join("") ), }; let data_label = DataId(ix as u32); data_buf.push_str(&format!("{} {}\n", data_label, data_val)); } write!(f, ".data:\n{}", data_buf) } } impl fmt::Display for SwayAsmSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SwayAsmSet::ScriptMain { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), SwayAsmSet::PredicateMain { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), SwayAsmSet::ContractAbi { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), // Libraries do not directly generate any asm. SwayAsmSet::Library => write!(f, ""), } } } impl fmt::Display for JumpOptimizedAsmSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { JumpOptimizedAsmSet::ScriptMain { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), JumpOptimizedAsmSet::PredicateMain { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), JumpOptimizedAsmSet::ContractAbi { data_section, program_section, } => write!(f, "{}\n{}", program_section, data_section), // Libraries do not directly generate any asm. JumpOptimizedAsmSet::Library => write!(f, ""), } } } impl fmt::Display for RegisterAllocatedAsmSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { RegisterAllocatedAsmSet::ScriptMain { program_section, data_section, } => { write!(f, "{}\n{}", program_section, data_section) } RegisterAllocatedAsmSet::PredicateMain { program_section, data_section, } => { write!(f, "{}\n{}", program_section, data_section) } RegisterAllocatedAsmSet::ContractAbi { program_section, data_section, } => { write!(f, "{}\n{}", program_section, data_section) } // Libraries do not directly generate any asm. RegisterAllocatedAsmSet::Library => write!(f, ""), } } } impl fmt::Display for FinalizedAsm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { FinalizedAsm::ScriptMain { program_section, data_section, } => write!(f, "{}\n{}", program_section, data_section), FinalizedAsm::PredicateMain { program_section, data_section, } => write!(f, "{}\n{}", program_section, data_section), FinalizedAsm::ContractAbi { program_section, data_section, } => write!(f, "{}\n{}", program_section, data_section), // Libraries do not directly generate any asm. FinalizedAsm::Library => write!(f, ""), } } } impl fmt::Display for AbstractInstructionSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, ".program:\n{}", self.ops .iter() .map(|x| format!("{}", x)) .collect::<Vec<_>>() .join("\n") ) } } impl fmt::Display for InstructionSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, ".program:\n{}", self.ops .iter() .map(|x| format!("{}", x)) .collect::<Vec<_>>() .join("\n") ) } } #[derive(Default, Clone, Debug)] pub(crate) struct AsmNamespace { data_section: DataSection, variables: HashMap<Ident, VirtualRegister>, } /// An address which refers to a value in the data section of the asm. #[derive(Clone, Debug)] pub(crate) struct DataId(pub(crate) u32); impl fmt::Display for DataId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "data_{}", self.0) } } impl AsmNamespace { pub(crate) fn insert_variable(&mut self, var_name: Ident, register_location: VirtualRegister) { self.variables.insert(var_name, register_location); } pub(crate) fn insert_data_value(&mut self, data: &Data) -> DataId { self.data_section.insert_data_value(data) } /// Finds the register which contains variable `var_name` /// The `get` is unwrapped, because invalid variable expressions are /// checked for in the type checking stage. pub(crate) fn look_up_variable(&self, var_name: &Ident) -> CompileResult<&VirtualRegister> { match self.variables.get(var_name) { Some(o) => ok(o, vec![], vec![]), None => err( vec![], vec![CompileError::Internal( "Unknown variable in assembly generation. This should have been an error \ during type checking.", var_name.span().clone(), )], ), } } } pub(crate) fn compile_ast_to_asm( ast: TypedParseTree, build_config: &BuildConfig, ) -> CompileResult<FinalizedAsm> { let mut register_sequencer = RegisterSequencer::new(); let mut warnings = vec![]; let mut errors = vec![]; let (asm, _asm_namespace) = match ast { TypedParseTree::Script { main_function, namespace: ast_namespace, declarations, .. } => { let mut namespace: AsmNamespace = Default::default(); let mut asm_buf = build_preamble(&mut register_sequencer).to_vec(); check!( add_all_constant_decls( &mut namespace, &mut register_sequencer, &mut asm_buf, &declarations, &ast_namespace, ), return err(warnings, errors), warnings, errors ); // start generating from the main function let return_register = register_sequencer.next(); let mut body = check!( convert_code_block_to_asm( &main_function.body, &mut namespace, &mut register_sequencer, // TODO validate that this isn't just implicit returns? Some(&return_register), ), vec![], warnings, errors ); asm_buf.append(&mut body); asm_buf.append(&mut check!( ret_or_retd_value( &main_function, return_register, &mut register_sequencer, &mut namespace ), return err(warnings, errors), warnings, errors )); ( SwayAsmSet::ScriptMain { program_section: AbstractInstructionSet { ops: asm_buf }, data_section: namespace.data_section.clone(), }, namespace, ) } TypedParseTree::Predicate { main_function, namespace: ast_namespace, declarations, .. } => { let mut namespace: AsmNamespace = Default::default(); let mut asm_buf = build_preamble(&mut register_sequencer).to_vec(); check!( add_all_constant_decls( &mut namespace, &mut register_sequencer, &mut asm_buf, &declarations, &ast_namespace, ), return err(warnings, errors), warnings, errors ); // start generating from the main function let mut body = check!( convert_code_block_to_asm( &main_function.body, &mut namespace, &mut register_sequencer, None, ), vec![], warnings, errors ); asm_buf.append(&mut body); ( SwayAsmSet::PredicateMain { program_section: AbstractInstructionSet { ops: asm_buf }, data_section: namespace.data_section.clone(), }, namespace, ) } TypedParseTree::Contract { abi_entries, namespace: ast_namespace, declarations, .. } => { let mut namespace: AsmNamespace = Default::default(); let mut asm_buf = build_preamble(&mut register_sequencer).to_vec(); check!( add_all_constant_decls( &mut namespace, &mut register_sequencer, &mut asm_buf, &declarations, &ast_namespace, ), return err(warnings, errors), warnings, errors ); let (selectors_and_labels, mut contract_asm) = check!( compile_contract_to_selectors(abi_entries, &mut namespace, &mut register_sequencer), return err(warnings, errors), warnings, errors ); asm_buf.append(&mut build_contract_abi_switch( &mut register_sequencer, &mut namespace.data_section, selectors_and_labels, )); asm_buf.append(&mut contract_asm); ( SwayAsmSet::ContractAbi { program_section: AbstractInstructionSet { ops: asm_buf }, data_section: namespace.data_section.clone(), }, namespace, ) } TypedParseTree::Library { .. } => (SwayAsmSet::Library, Default::default()), }; if build_config.print_intermediate_asm { println!("{}", asm); } let finalized_asm = asm .remove_unnecessary_jumps() .allocate_registers() .optimize(); if build_config.print_finalized_asm { println!("{}", finalized_asm); } check!( crate::checks::check_invalid_opcodes(&finalized_asm), return err(warnings, errors), warnings, errors ); ok(finalized_asm, warnings, errors) } impl SwayAsmSet { pub(crate) fn remove_unnecessary_jumps(self) -> JumpOptimizedAsmSet { match self { SwayAsmSet::ScriptMain { data_section, program_section, } => JumpOptimizedAsmSet::ScriptMain { data_section, program_section: program_section.remove_sequential_jumps(), }, SwayAsmSet::PredicateMain { data_section, program_section, } => JumpOptimizedAsmSet::PredicateMain { data_section, program_section: program_section.remove_sequential_jumps(), }, SwayAsmSet::Library {} => JumpOptimizedAsmSet::Library, SwayAsmSet::ContractAbi { data_section, program_section, } => JumpOptimizedAsmSet::ContractAbi { data_section, program_section: program_section.remove_sequential_jumps(), }, } } } impl JumpOptimizedAsmSet { fn allocate_registers(self) -> RegisterAllocatedAsmSet { match self { JumpOptimizedAsmSet::Library => RegisterAllocatedAsmSet::Library, JumpOptimizedAsmSet::ScriptMain { data_section, program_section, } => { let program_section = program_section .realize_labels(&data_section) .allocate_registers(); RegisterAllocatedAsmSet::ScriptMain { data_section, program_section, } } JumpOptimizedAsmSet::PredicateMain { data_section, program_section, } => { let program_section = program_section .realize_labels(&data_section) .allocate_registers(); RegisterAllocatedAsmSet::PredicateMain { data_section, program_section, } } JumpOptimizedAsmSet::ContractAbi { program_section, data_section, } => RegisterAllocatedAsmSet::ContractAbi { program_section: program_section .realize_labels(&data_section) .allocate_registers(), data_section, }, } } } /// Represents an ASM set which has had jump labels and jumps optimized pub enum JumpOptimizedAsmSet { ContractAbi { data_section: DataSection, program_section: AbstractInstructionSet, }, ScriptMain { data_section: DataSection, program_section: AbstractInstructionSet, }, PredicateMain { data_section: DataSection, program_section: AbstractInstructionSet, }, // Libraries do not generate any asm. Library, } /// Represents an ASM set which has had registers allocated pub enum RegisterAllocatedAsmSet { ContractAbi { data_section: DataSection, program_section: InstructionSet, }, ScriptMain { data_section: DataSection, program_section: InstructionSet, }, PredicateMain { data_section: DataSection, program_section: InstructionSet, }, // Libraries do not generate any asm. Library, } impl RegisterAllocatedAsmSet { fn optimize(self) -> FinalizedAsm { // TODO implement this -- noop for now match self { RegisterAllocatedAsmSet::Library => FinalizedAsm::Library, RegisterAllocatedAsmSet::ScriptMain { mut program_section, data_section, } => { // ensure there's an even number of ops so the // data section offset is valid if program_section.ops.len() & 1 != 0 { program_section.ops.push(AllocatedOp { opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, comment: "word-alignment of data section".into(), owning_span: None, }); } FinalizedAsm::ScriptMain { program_section, data_section, } } RegisterAllocatedAsmSet::PredicateMain { mut program_section, data_section, } => { // ensure there's an even number of ops so the // data section offset is valid if program_section.ops.len() & 1 != 0 { program_section.ops.push(AllocatedOp { opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, comment: "word-alignment of data section".into(), owning_span: None, }); } FinalizedAsm::PredicateMain { program_section, data_section, } } RegisterAllocatedAsmSet::ContractAbi { mut program_section, data_section, } => { // ensure there's an even number of ops so the // data section offset is valid if program_section.ops.len() & 1 != 0 { program_section.ops.push(AllocatedOp { opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, comment: "word-alignment of data section".into(), owning_span: None, }); } FinalizedAsm::ContractAbi { program_section, data_section, } } } } } pub(crate) enum NodeAsmResult { JustAsm(Vec<Op>), ReturnStatement { asm: Vec<Op> }, } /// The tuple being returned here contains the opcodes of the code block and, /// optionally, a return register in case this node was a return statement fn convert_node_to_asm( node: &TypedAstNode, namespace: &mut AsmNamespace, register_sequencer: &mut RegisterSequencer, // Where to put the return value of this node, if it is needed. return_register: Option<&VirtualRegister>, ) -> CompileResult<NodeAsmResult> { let mut warnings = vec![]; let mut errors = vec![]; match &node.content { TypedAstNodeContent::WhileLoop(r#loop) => { let res = check!( convert_while_loop_to_asm(r#loop, namespace, register_sequencer), return err(warnings, errors), warnings, errors ); ok(NodeAsmResult::JustAsm(res), warnings, errors) } TypedAstNodeContent::Declaration(typed_decl) => { let res = check!( convert_decl_to_asm(typed_decl, namespace, register_sequencer), return err(warnings, errors), warnings, errors ); ok(NodeAsmResult::JustAsm(res), warnings, errors) } TypedAstNodeContent::ImplicitReturnExpression(exp) => { // if a return register was specified, we use it. If not, we generate a register but // it is going to get thrown away later (in coalescing) as it is never read let return_register = if let Some(return_register) = return_register { return_register.clone() } else { register_sequencer.next() }; let ops = check!( convert_expression_to_asm(exp, namespace, &return_register, register_sequencer), return err(warnings, errors), warnings, errors ); ok( NodeAsmResult::ReturnStatement { asm: ops }, warnings, errors, ) } TypedAstNodeContent::ReturnStatement(exp) => { // if a return register was specified, we use it. If not, we generate a register but // it is going to get thrown away later (in coalescing) as it is never read let return_register = if let Some(return_register) = return_register { return_register.clone() } else { register_sequencer.next() }; let ops = check!( convert_expression_to_asm( &exp.expr, namespace, &return_register, register_sequencer ), return err(warnings, errors), warnings, errors ); ok( NodeAsmResult::ReturnStatement { asm: ops }, warnings, errors, ) } TypedAstNodeContent::Expression(ref typed_expr) => { let return_register = if let Some(return_register) = return_register { return_register.clone() } else { register_sequencer.next() }; let asm = check!( convert_expression_to_asm( typed_expr, namespace, &return_register, register_sequencer ), return err(warnings, errors), warnings, errors ); ok(NodeAsmResult::JustAsm(asm), warnings, errors) } a => { println!("Unimplemented: {:?}", a); errors.push(CompileError::Unimplemented( "The ASM for this construct has not been written yet.", node.clone().span, )); err(warnings, errors) } } } /// Builds the asm preamble, which includes metadata and a jump past the metadata. /// Right now, it looks like this: /// /// WORD OP /// 1 JI program_start /// - NOOP /// 2 DATA_START (0-32) (in bytes, offset from $is) /// - DATA_START (32-64) /// 3 LW $ds $is 1 (where 1 is in words and $is is a byte address to base off of) /// - ADD $ds $ds $is /// 4 .program_start: fn build_preamble(register_sequencer: &mut RegisterSequencer) -> [Op; 6] { let label = register_sequencer.get_label(); [ // word 1 Op::jump_to_label(label.clone()), // word 1.5 Op { opcode: Either::Left(VirtualOp::NOOP), comment: "".into(), owning_span: None, }, // word 2 -- full word u64 placeholder Op { opcode: Either::Right(OrganizationalOp::DataSectionOffsetPlaceholder), comment: "data section offset".into(), owning_span: None, }, Op::unowned_jump_label_comment(label, "end of metadata"), // word 3 -- load the data offset into $ds Op { opcode: Either::Left(VirtualOp::DataSectionRegisterLoadPlaceholder), comment: "".into(), owning_span: None, }, // word 3.5 -- add $ds $ds $is Op { opcode: Either::Left(VirtualOp::ADD( VirtualRegister::Constant(ConstantRegister::DataSectionStart), VirtualRegister::Constant(ConstantRegister::DataSectionStart), VirtualRegister::Constant(ConstantRegister::InstructionStart), )), comment: "".into(), owning_span: None, }, ] } /// Builds the contract switch statement, or function selector, which takes the selector /// stored in the call frame (see https://github.com/FuelLabs/sway/issues/97#issuecomment-870150684 /// for an explanation of its location) fn build_contract_abi_switch( register_sequencer: &mut RegisterSequencer, data_section: &mut DataSection, selectors_and_labels: Vec<([u8; 4], Label)>, ) -> Vec<Op> { let input_selector_register = register_sequencer.next(); let mut asm_buf = vec![Op { opcode: Either::Right(OrganizationalOp::Comment), comment: "Begin contract ABI selector switch".into(), owning_span: None, }]; // load the selector from the call frame asm_buf.push(Op { opcode: Either::Left(VirtualOp::LW( input_selector_register.clone(), VirtualRegister::Constant(ConstantRegister::FramePointer), // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 // We expect the last four bytes of this word to contain the selector, and the first // four bytes to all be 0. VirtualImmediate12::new_unchecked(73, "constant infallible value"), )), comment: "load input function selector".into(), owning_span: None, }); for (selector, label) in selectors_and_labels { // put the selector in the data section let data_label = data_section.insert_data_value(&Literal::U32(u32::from_be_bytes(selector))); // load the data into a register for comparison let prog_selector_register = register_sequencer.next(); asm_buf.push(Op { opcode: Either::Left(VirtualOp::LWDataId( prog_selector_register.clone(), data_label, )), comment: "load fn selector for comparison".into(), owning_span: None, }); // compare with the input selector let comparison_result_register = register_sequencer.next(); asm_buf.push(Op { opcode: Either::Left(VirtualOp::EQ( comparison_result_register.clone(), input_selector_register.clone(), prog_selector_register, )), comment: "function selector comparison".into(), owning_span: None, }); // jump to the function label if the selector was equal asm_buf.push(Op { // if the comparison result is _not_ equal to 0, then it was indeed equal. opcode: Either::Right(OrganizationalOp::JumpIfNotEq( VirtualRegister::Constant(ConstantRegister::Zero), comparison_result_register, label, )), comment: "jump to selected function".into(), owning_span: None, }); } // if none of the selectors matched, then ret asm_buf.push(Op { // see https://github.com/FuelLabs/sway/issues/97#issuecomment-875674105 opcode: Either::Left(VirtualOp::RET(VirtualRegister::Constant( ConstantRegister::Zero, ))), comment: "return if no selectors matched".into(), owning_span: None, }); asm_buf } fn add_all_constant_decls( namespace: &mut AsmNamespace, register_sequencer: &mut RegisterSequencer, asm_buf: &mut Vec<Op>, declarations: &[TypedDeclaration], ast_namespace: &Namespace, ) -> CompileResult<()> { let mut warnings = vec![]; let mut errors = vec![]; check!( add_global_constant_decls(namespace, register_sequencer, asm_buf, declarations), return err(warnings, errors), warnings, errors ); check!( add_module_constant_decls(namespace, register_sequencer, asm_buf, ast_namespace), return err(warnings, errors), warnings, errors ); ok((), warnings, errors) } fn add_global_constant_decls( namespace: &mut AsmNamespace, register_sequencer: &mut RegisterSequencer, asm_buf: &mut Vec<Op>, declarations: &[TypedDeclaration], ) -> CompileResult<()> { let mut warnings = vec![]; let mut errors = vec![]; for declaration in declarations { if let TypedDeclaration::ConstantDeclaration(decl) = declaration { let mut ops = check!( convert_constant_decl_to_asm(decl, namespace, register_sequencer), return err(warnings, errors), warnings, errors ); asm_buf.append(&mut ops); } } ok((), warnings, errors) } fn add_module_constant_decls( namespace: &mut AsmNamespace, register_sequencer: &mut RegisterSequencer, asm_buf: &mut Vec<Op>, ast_namespace: &Namespace, ) -> CompileResult<()> { let mut warnings = vec![]; let mut errors = vec![]; // NOTE: this is currently flattening out the entire namespace, which is problematic. To fix // it we need to support hierarchical names (or at least absolute normalised names) to // AsmNamespace. This can be done in the new ASM generator which translates from IR, coming // soon. for ns in ast_namespace.get_all_imported_modules() { for decl in ns.get_all_declared_symbols() { if let TypedDeclaration::ConstantDeclaration(decl) = decl { let mut ops = check!( convert_constant_decl_to_asm(decl, namespace, register_sequencer), return err(warnings, errors), warnings, errors ); asm_buf.append(&mut ops); } } check!( add_module_constant_decls(namespace, register_sequencer, asm_buf, ns), return err(warnings, errors), warnings, errors ); } ok((), warnings, errors) } /// The function selector value and corresponding label. type JumpDestination = Vec<([u8; 4], Label)>; /// A vector of opcodes representing the body of a contract ABI function. type AbiFunctionOpcodeBuffer = Vec<Op>; /// The function selector information and compiled body of a contract ABI function. type SerializedAbiFunction = (JumpDestination, AbiFunctionOpcodeBuffer); /// Given a contract's abi entries, compile them to jump destinations and an opcode buffer. fn compile_contract_to_selectors( abi_entries: Vec<TypedFunctionDeclaration>, namespace: &mut AsmNamespace, register_sequencer: &mut RegisterSequencer, ) -> CompileResult<SerializedAbiFunction> { let mut warnings = vec![]; let mut errors = vec![]; // for every ABI function, we need: // 0) a jump label // 1) loading the argument from the call frame into the register for the function // 2) the function's bytecode itself // 3) the function selector let mut selectors_labels_buf = vec![]; let mut asm_buf = vec![]; for decl in abi_entries { // TODO wrapping things in a struct should be doable by the compiler eventually, // allowing users to pass in any number of free-floating parameters (bound by immediate limits maybe). // https://github.com/FuelLabs/sway/pull/115#discussion_r666466414 if decl.parameters.len() != 4 { errors.push(CompileError::InvalidNumberOfAbiParams { span: decl.parameters_span(), }); continue; } // there are currently four parameters to every ABI function, and they are required to be // in this order let cgas_name = decl.parameters[0].name.clone(); let bal_name = decl.parameters[1].name.clone(); let coin_color_name = decl.parameters[2].name.clone(); let user_argument_name = decl.parameters[3].name.clone(); // the function selector is the first four bytes of the hashed declaration/params according // to https://github.com/FuelLabs/sway/issues/96 let selector = check!(decl.to_fn_selector_value(), [0u8; 4], warnings, errors); let fn_label = register_sequencer.get_label(); asm_buf.push(Op::jump_label(fn_label.clone(), decl.span.clone())); // load the call frame argument into the function argument register let user_argument_register = register_sequencer.next(); let cgas_register = register_sequencer.next(); let bal_register = register_sequencer.next(); let coin_color_register = register_sequencer.next(); asm_buf.push(load_user_argument(user_argument_register.clone())); asm_buf.push(load_cgas(cgas_register.clone())); asm_buf.push(load_bal(bal_register.clone())); asm_buf.push(load_coin_color(coin_color_register.clone())); asm_buf.append(&mut check!( convert_abi_fn_to_asm( &decl, (user_argument_name, user_argument_register), (cgas_name, cgas_register), (bal_name, bal_register), (coin_color_name, coin_color_register), namespace, register_sequencer ), vec![], warnings, errors )); selectors_labels_buf.push((selector, fn_label)); } ok((selectors_labels_buf, asm_buf), warnings, errors) } /// Given a register, load the user-provided argument into it fn load_user_argument(return_register: VirtualRegister) -> Op { Op { opcode: Either::Left(VirtualOp::LW( return_register, VirtualRegister::Constant(ConstantRegister::FramePointer), // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 VirtualImmediate12::new_unchecked(74, "infallible constant 74"), )), comment: "loading argument into abi function".into(), owning_span: None, } } /// Given a register, load the current value of $cgas into it fn load_cgas(return_register: VirtualRegister) -> Op { Op { opcode: Either::Left(VirtualOp::LW( return_register, VirtualRegister::Constant(ConstantRegister::ContextGas), VirtualImmediate12::new_unchecked(0, "infallible constant 0"), )), comment: "loading cgas into abi function".into(), owning_span: None, } } /// Given a register, load the current value of $bal into it fn load_bal(return_register: VirtualRegister) -> Op { Op { opcode: Either::Left(VirtualOp::LW( return_register, VirtualRegister::Constant(ConstantRegister::Balance), VirtualImmediate12::new_unchecked(0, "infallible constant 0"), )), comment: "loading coin balance into abi function".into(), owning_span: None, } } /// Given a register, load a pointer to the current coin color into it fn load_coin_color(return_register: VirtualRegister) -> Op { Op { opcode: Either::Left(VirtualOp::LW( return_register, VirtualRegister::Constant(ConstantRegister::FramePointer), VirtualImmediate12::new_unchecked(5, "infallible constant 5"), )), comment: "loading coin color into abi function".into(), owning_span: None, } } /// Given a [TypedFunctionDeclaration] and a `return_register`, return /// the return value of the function using either a `RET` or a `RETD` opcode. fn ret_or_retd_value( func: &TypedFunctionDeclaration, return_register: VirtualRegister, register_sequencer: &mut RegisterSequencer, namespace: &mut AsmNamespace, ) -> CompileResult<Vec<Op>> { let mut errors = vec![]; let warnings = vec![]; let mut asm_buf = vec![]; let main_func_ret_ty: TypeInfo = match resolve_type(func.return_type, &func.return_type_span) { Ok(o) => o, Err(e) => { errors.push(e.into()); return err(warnings, errors); } }; if main_func_ret_ty.is_unit() { // unit returns should always be zero, although because they can be // omitted from functions, the register is sometimes uninitialized. // Manually return zero in this case. return ok( vec![Op { opcode: Either::Left(VirtualOp::RET(VirtualRegister::Constant( ConstantRegister::Zero, ))), owning_span: Some(func.return_type_span.clone()), comment: format!("fn {} returns unit", func.name.as_str()), }], warnings, errors, ); } let span = sway_types::span::Span { span: pest::Span::new("TODO(static span)".into(), 0, 0).unwrap(), path: None, }; let size_of_main_func_return_bytes = main_func_ret_ty.size_in_words(&span).expect( "TODO(static span): Internal error: Static spans will allow for a proper error here.", ) * 8; if size_of_main_func_return_bytes <= 8 { asm_buf.push(Op { owning_span: None, opcode: Either::Left(VirtualOp::RET(return_register)), comment: format!("{} fn return value", func.name.as_str()), }); } else { // if the type is larger than one word, then we use RETD to return data // RB is the size_in_bytes let rb_register = register_sequencer.next(); let size_bytes = namespace.insert_data_value(&Literal::U64(size_of_main_func_return_bytes)); // `return_register` is $rA asm_buf.push(Op { opcode: Either::Left(VirtualOp::LWDataId(rb_register.clone(), size_bytes)), owning_span: Some(func.return_type_span.clone()), comment: "loading rB for RETD".into(), }); // now $rB has the size of the type in bytes asm_buf.push(Op { owning_span: None, opcode: Either::Left(VirtualOp::RETD(return_register, rb_register)), comment: format!("{} fn return value", func.name.as_str()), }); } ok(asm_buf, warnings, errors) }
{ let mut label_namespace: HashMap<&Label, u64> = Default::default(); let mut counter = 0; for op in &self.ops { match op.opcode { Either::Right(OrganizationalOp::Label(ref lab)) => { label_namespace.insert(lab, counter); } // A special case for LWDataId which may be 1 or 2 ops, depending on the source size. Either::Left(VirtualOp::LWDataId(_, ref data_id)) => { let type_of_data = data_section.type_of_data(data_id).expect( "Internal miscalculation in data section -- data id did not match up to any actual data", ); counter += if type_of_data.stack_size_of() > 1 { 2 } else { 1 }; } // these ops will end up being exactly one op, so the counter goes up one Either::Right(OrganizationalOp::Jump(..)) | Either::Right(OrganizationalOp::JumpIfNotEq(..)) | Either::Left(_) => { counter += 1; } Either::Right(OrganizationalOp::Comment) => (), Either::Right(OrganizationalOp::DataSectionOffsetPlaceholder) => { // If the placeholder is 32 bits, this is 1. if 64, this should be 2. We use LW // to load the data, which loads a whole word, so for now this is 2. counter += 2 } } } let mut realized_ops = vec![]; for Op { opcode, owning_span, comment, } in self.ops.clone().into_iter() { match opcode { Either::Left(op) => realized_ops.push(RealizedOp { opcode: op, owning_span, comment, }), Either::Right(org_op) => match org_op { OrganizationalOp::Jump(ref lab) => { let offset = label_namespace.get(lab).unwrap(); let imm = VirtualImmediate24::new_unchecked( *offset, "Programs with more than 2^24 labels are unsupported right now", ); realized_ops.push(RealizedOp { opcode: VirtualOp::JI(imm), owning_span, comment, }); } OrganizationalOp::JumpIfNotEq(r1, r2, ref lab) => { let offset = label_namespace.get(lab).unwrap(); let imm = VirtualImmediate12::new_unchecked( *offset, "Programs with more than 2^12 labels are unsupported right now", ); realized_ops.push(RealizedOp { opcode: VirtualOp::JNEI(r1, r2, imm), owning_span, comment, }); } OrganizationalOp::DataSectionOffsetPlaceholder => { realized_ops.push(RealizedOp { opcode: VirtualOp::DataSectionOffsetPlaceholder, owning_span: None, comment: String::new(), }); } OrganizationalOp::Comment => continue, OrganizationalOp::Label(..) => continue, }, }; } RealizedAbstractInstructionSet { ops: realized_ops } }
chat.js
/*! * Copyright � 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ /** * These values are all defined/updated at runtime to reflect the current session. */ let sessionId = null; let nextEventsLink = null; let chatContent = null; /** * This function is called when a new session should be started. This normally happens when the page is loaded. */ let startNewSession = function () { let successHandler = () => { chatContent.innerHTML = ""; setTimeout(pollForEvents, 2000); }; createSession(successHandler); }; /** * This function polls for events based on the nextEventsLink. */ let pollForEvents = function () { if (!botId || !revisionId || !sessionId || !nextEventsLink) { return; } checkToken().then(() => { fetch(baseURL + nextEventsLink, { method: "GET", headers: { "Authorization": authToken } }).then(function (response) { return response.json(); }) .then((response) => { response.links.forEach(link => { if (link.rel === 'next') { nextEventsLink = link.href; } }); response.items.forEach(event => { // this sample only handles message events but there are other types of events (typing, transfer, end chat) // that might need to be handled by a production connector if (event.type === "messageEvent") { createMessageElements(event); chatContent.scrollTop = chatContent.scrollHeight; } }); setTimeout(pollForEvents, 2000); }, (error) => { console.error(error); }); }); }; let createMessageElements = function (messageEvent) { let messageStyle = messageEvent.sourceId === userId ? "userInput" : "botResponse"; if (messageEvent.bodyElements && messageEvent.bodyElements.length) { messageEvent.bodyElements.forEach(bodyElement => { switch (bodyElement.type) { case "textElement": chatContent.innerHTML += "<div class='chatRow text " + messageStyle + "'><span>" + bodyElement.text + "</span></div>"; break; case "htmlElement": chatContent.innerHTML += "<div class='chatRow html " + messageStyle + "'>" + bodyElement.text + "</div>"; break; } }); } else if (messageEvent.text) { chatContent.innerHTML += "<div class='chatRow text " + messageStyle + "'><span>" + messageEvent.text + "</span></div>"; } if (messageEvent.attachments && messageEvent.attachments.length) { let attachmentIndex = 0; messageEvent.attachments.forEach(attachment => { if (attachment.mediaType.startsWith("image/")) { let parentDivId = messageEvent.id + "_" + attachmentIndex++; chatContent.innerHTML += "<div class='chatRow " + messageStyle + "' id='" + parentDivId + "'></div>"; getImageContent(attachment.uri, attachment.mediaType, parentDivId); } }); } if (messageEvent.messageLinks && messageEvent.messageLinks.length) { messageEvent.messageLinks.forEach(link => { chatContent.innerHTML += "<div class='chatRow " + messageStyle + "'><a href='" + link.uri + "' target='_blank'>" + link.label + "</a></div>"; }); } if (messageEvent.buttons && messageEvent.buttons.length) { let buttonDivContents = ""; let buttonIndex = 0; messageEvent.buttons.forEach(button => { let buttonId = messageEvent.id+"_"+buttonIndex++; if(button.eventText==="Snap / Upload pic"){ // buttonDivContents += "<input type='button' class= 'btn-primary' id='" + buttonId + "' onclick='uploadFile()'>"+'<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-camera" viewBox="0 0 16 16"> <path d="M15 12a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V6a1 1 0 0 1 1-1h1.172a3 3 0 0 0 2.12-.879l.83-.828A1 1 0 0 1 6.827 3h2.344a1 1 0 0 1 .707.293l.828.828A3 3 0 0 0 12.828 5H14a1 1 0 0 1 1 1v6zM2 4a2 2 0 0 0-2 2v6a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V6a2 2 0 0 0-2-2h-1.172a2 2 0 0 1-1.414-.586l-.828-.828A2 2 0 0 0 9.172 2H6.828a2 2 0 0 0-1.414.586l-.828.828A2 2 0 0 1 3.172 4H2z"></path> <path d="M8 11a2.5 2.5 0 1 1 0-5 2.5 2.5 0 0 1 0 5zm0 1a3.5 3.5 0 1 0 0-7 3.5 3.5 0 0 0 0 7zM3 6.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"></path> </svg> </button>'; buttonDivContents += '<button type="button" class="btn btn-primary" id="'+buttonId+'" onclick="uploadFile()" > <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-camera" viewBox="0 0 16 16"> <path d="M15 12a1 1 0 0 1-1 1H2a1 1 0 0 1-1-1V6a1 1 0 0 1 1-1h1.172a3 3 0 0 0 2.12-.879l.83-.828A1 1 0 0 1 6.827 3h2.344a1 1 0 0 1 .707.293l.828.828A3 3 0 0 0 12.828 5H14a1 1 0 0 1 1 1v6zM2 4a2 2 0 0 0-2 2v6a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V6a2 2 0 0 0-2-2h-1.172a2 2 0 0 1-1.414-.586l-.828-.828A2 2 0 0 0 9.172 2H6.828a2 2 0 0 0-1.414.586l-.828.828A2 2 0 0 1 3.172 4H2z"></path> <path d="M8 11a2.5 2.5 0 1 1 0-5 2.5 2.5 0 0 1 0 5zm0 1a3.5 3.5 0 1 0 0-7 3.5 3.5 0 0 0 0 7zM3 6.5a.5.5 0 1 1-1 0 .5.5 0 0 1 1 0z"></path> </svg> Snap / Upload Pic </button>'; } else { buttonDivContents += "<input type='button' class= 'btn-primary' id='" + buttonId + "' value='" + button.eventText + "' onclick='sendUserInputEvent(\""+button.eventText+"\")'/>"; }; }); chatContent.innerHTML += "<div class='chatButtons'>"+buttonDivContents+"</div>"; } }; /** * Images returned by the bot have relative URIs that require the same authorization as all other bot calls. * This function loads the image using the appropriate authorization header and then updates the imageDiv with the * image content. */ let getImageContent = function (uri, mediaType, parentDivId) { checkToken().then(() => { fetch(baseURL + uri, { method: "GET", headers: { "Authorization": authToken, 'Access-Control-Allow-Origin':"*", "Content-Type": mediaType } }) .then((response) => { return response.text(); }) .then((data) => { let imageDiv = document.getElementById(parentDivId); imageDiv.innerHTML = data; chatContent.scrollTop = chatContent.scrollHeight; }); }); }; /** * This function sends a user input event to the bot. */ let sendUserInputEvent = function (userInput) { let event = {}; event.sourceId = userId; event.sourceName = userName; event.type = "messageEvent"; event.text = userInput; sendChatEvent(event, 'application/vnd.sas.natural.language.conversations.create.message.event+json', null); }; /** * This function checks if an authorization token is available. * * NOTE: If this connector is meant to be long running then additional code needs to be added to handle * expired tokens. * * @returns {Promise} */ let checkToken = function () { console.log("entered checkToken"); //check if the authorization token is available if (authToken) { return Promise.resolve(); } else { console.error("Authentication token not provided."); } }; /** * This function creates a new chat session and calls the successHandler when it is done. * * @param successHandler - function to call when the session has been created */ let createSession = function (successHandler) { console.log("Entered create session"); sessionId = null; nextEventsLink = null; checkToken().then(() => { console.log("token checked"); // this information is stored with the session and used to identify where/how the session was created let data = { "connectorName": connectorName, "properties": { "userName": userName, "userId": userId } }; fetch(baseURL + '/naturalLanguageConversations/bots/' + botId + '/revisions/' + revisionId + '/sessions', { method: "POST", body: JSON.stringify(data), headers: { "Authorization": authToken, 'Access-Control-Allow-Origin':"*", "Content-Type": "application/json" } }).then(function (response) { return response.json(); }) .then((response) => { sessionId = response.id; nextEventsLink = '/naturalLanguageConversations/bots/' + botId + '/revisions/' + revisionId + '/sessions/' + sessionId + "/events"; sendStartChatEvent(successHandler); }, (error) => { console.error(error); }); }); }; /** * This function sends a Start Chat Event. * * @param successHandler - function to call when the event has been sent */ let sendStartChatEvent = function (successHandler) { let event = {}; event.type = "startChatEvent"; event.sourceId = userId; event.sourceName = userName; sendChatEvent(event, 'application/vnd.sas.natural.language.conversations.create.start.chat.event+json', successHandler); }; /** * This function sends an event of any type to the bot. * * @param event - event to send to the bot * @param eventContentType - the event type * @param successHandler - (optional) function to call when the session has been created */ let sendChatEvent = function (event, eventContentType, successHandler) { checkToken().then(() => { fetch(baseURL + '/naturalLanguageConversations/bots/' + botId + '/revisions/' + revisionId + '/sessions/' + sessionId + "/events", { method: "POST", body: JSON.stringify(event),
} }).then(() => { if (successHandler) successHandler(); }, (error) => { console.error(error); }); }); }; /** * This function runs when the page loads. It sets up listeners and starts the chat session. */ function chatSetup() { chatContent = document.getElementById('chatContent'); let userInputField = document.getElementById("userInputField"); let sendButton = document.getElementById('sendButton'); let submitUserInput = () => { let userInput = userInputField.value; userInputField.value = ""; sendUserInputEvent(userInput); }; userInputField.addEventListener('keypress', function (e) { if (e.key === 'Enter') { submitUserInput(); } }); sendButton.addEventListener('click', function (e) { submitUserInput(); }); // start a new chat session startNewSession(); };
headers: { "Authorization": authToken, "Content-Type": eventContentType
doc.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go_gapic. DO NOT EDIT. // Package secretmanager is an auto-generated package for the // Secret Manager API. // // Stores sensitive data such as API keys, passwords, and certificates. // Provides convenience while improving security. // // Use of Context // // The ctx passed to NewClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more // please visit pkg.go.dev/cloud.google.com/go. package secretmanager // import "cloud.google.com/go/secretmanager/apiv1" import ( "context" "os" "runtime" "strconv" "strings" "unicode" "google.golang.org/api/option" "google.golang.org/grpc/metadata" ) // For more information on implementing a client constructor hook, see // https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) const versionClient = "20201117" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) out = out.Copy() for _, md := range mds { for k, v := range md { out[k] = append(out[k], v...) } } return metadata.NewOutgoingContext(ctx, out) }
} b, err := strconv.ParseBool(raw) return b, err } // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } // versionGo returns the Go runtime version. The returned string // has no whitespace, suitable for reporting in header. func versionGo() string { const develPrefix = "devel +" s := runtime.Version() if strings.HasPrefix(s, develPrefix) { s = s[len(develPrefix):] if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { s = s[:p] } return s } notSemverRune := func(r rune) bool { return !strings.ContainsRune("0123456789.", r) } if strings.HasPrefix(s, "go1") { s = s[2:] var prerelease string if p := strings.IndexFunc(s, notSemverRune); p >= 0 { s, prerelease = s[:p], s[p:] } if strings.HasSuffix(s, ".") { s += "0" } else if strings.Count(s, ".") < 2 { s += ".0" } if prerelease != "" { s += "-" + prerelease } return s } return "UNKNOWN" }
func checkDisableDeadlines() (bool, error) { raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") if !ok { return false, nil
GithubAsciiArtApp.go
package main import ( "../reader" "../gitclient" "flag" "github.com/libgit2/git2go" "os/user" "strings" "time" "fmt" ) func main() { modelFile, outputRepoPath, gitConfigPath, year, weekOffset := setupArgs() fmt.Printf("Read commit data from [%s]\n", modelFile) commitDataChan, done, err := reader.ReadCommitDataFromFileToChannel(modelFile) checkError(err) fmt.Printf("Create/re-use repo [%s]\n", outputRepoPath) repoClient, err := gitclient.New(outputRepoPath, readGitConfig(gitConfigPath))
fmt.Printf("Calculated commits for year [%d] with offset [%d]\n", year, weekOffset) for { select { case commitData := <-commitDataChan: commitTimes := CalculateCommitTimes(commitData, year, weekOffset) commitDataAtTimes(repoClient, commitTimes...) case <- done: fmt.Printf("Finished committing to [%s]\n", outputRepoPath) return } } } func setupArgs() (string, string, string, int, int) { inputModel := flag.String("input-model", "./models/mario_head.txt", "File with commit model") outputRepoPath := flag.String("output-repo", "./output_repo", "Output repo path") gitConfigPath := flag.String("git-config", "~/.gitconfig", "Git config file") year := flag.Int("year", 2015, "Year of commit messages") weekOffset := flag.Int("offset", 0, "Offest of the image from the begginning of the year (in weeks)") flag.Parse() return *inputModel, *outputRepoPath, *gitConfigPath, *year, *weekOffset } func checkError(err error) { if err != nil { panic(err) } } func readGitConfig(configPath string) *git.Config { usr, err := user.Current() checkError(err) pathWithReplacedTilda := strings.Replace(configPath, "~", usr.HomeDir, 1) fmt.Printf("Read git config from [%s]\n", pathWithReplacedTilda) config, err := git.OpenOndisk(nil, pathWithReplacedTilda) checkError(err) return config } func CalculateCommitTimes(commitData reader.CommitData, year int, weekOffset int) []time.Time { timeArray := make([]time.Time, commitData.NumberOfCommits) for i := 0; i < len(timeArray); i++ { timeArray[i] = CalculateCommitTime(commitData, year, weekOffset).Add(time.Duration(i) * time.Minute) } return timeArray } func CalculateCommitTime(commitData reader.CommitData, year int, weekOffset int) time.Time { firstSundayOfYearAtNoon := FirstSunday(year).Add(12 * time.Hour) shiftedBeginningOfYear := firstSundayOfYearAtNoon.AddDate(0, 0, weekOffset * 7) return shiftedBeginningOfYear.AddDate(0, 0, commitData.DayOfWeek + (7 * commitData.WeekOfYear)) } func FirstSunday(year int) time.Time { beginningOfYear := time.Date(year, 1, 1, 0, 0, 0, 0, time.UTC) daysToLastSunday := int(time.Sunday) - int(beginningOfYear.Weekday()) return beginningOfYear.AddDate(0, 0, daysToLastSunday) } func commitDataAtTimes(gc *gitclient.GitClient, commitTimes ...time.Time) { for i := range commitTimes { gc.CreateCommitAtDate(commitTimes[i], "Auto-generated commit") } }
checkError(err)
main.rs
fn main() { let s = String::from("hello"); takes_ownership(s); let _x = 5; makes_copy(5); let s = gives_ownership(); let s1 = takes_and_gives_back(s); let (s2, _len) = calculate_length(s1); let _len = calculate_length1(&s2); let mut s3 = String::from("Hello"); change(&mut s3); { let _r1 = &mut s3; } let r2 = &mut s3; println!("{}", r2); } fn takes_ownership(some_string: String) { println!("{}",some_string); } fn makes_copy(some_integer: i32) { println!("{}", some_integer); } fn gives_ownership() -> String { let some_string = String::from("hello"); some_string } fn takes_and_gives_back(a_string: String) -> String
fn calculate_length(s: String) -> (String, usize) { let len = s.len(); (s, len) } fn calculate_length1(s: &String) -> usize { s.len() } fn change(a_string: &mut String) { a_string.push_str(" world"); }
{ println!("get String {}", a_string); a_string }
sample.py
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Matt Clarke <[email protected]> # # ***************************************************************************** from nicos.core import Param from nicos.devices.sample import Sample class EssSample(Sample): """Device that collects the various sample properties specific to samples at ESS. """ parameters = { 'sample_formula': Param('formula', type=str, settable=True, category='sample'), 'number_of': Param('number_of', type=int, settable=True, category='sample'), 'mass_volume': Param('mass/volume', type=str, settable=True, category='sample'), 'density': Param('density', type=str, settable=True, category='sample'), }
107.63ebe1b0.js
(window.webpackJsonp=window.webpackJsonp||[]).push([[107],{211:function(n,s,a){"use strict";a.r(s);var r=a(0),t=Object(r.a)({},function(){var n=this,s=n.$createElement,a=n._self._c||s;return a("ContentSlotsDistributor",{attrs:{"slot-key":n.$parent.slotKey}},[a("h1",{attrs:{id:"_4-1-9-1-基数排序"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-9-1-基数排序","aria-hidden":"true"}},[n._v("#")]),n._v(" 4.1.9.1 基数排序")]),n._v(" "),a("blockquote",[a("p",[n._v("将所有待比较数值(正整数)统一为同样的数字长度,数字较短的数前面补零。"),a("br"),n._v("\n然后,从最低位开始,依次进行一次排序。"),a("br"),n._v("\n这样从最低位排序一直到最高位排序完成以后,数列就变成一个有序序列。")])]),n._v(" "),a("p",[n._v("分类\t排序算法"),a("br"),n._v("\n数据结构\t数组"),a("br"),n._v("\n最坏时间复杂度\tO(kN)"),a("br"),n._v("\n最坏空间复杂度\tO(k+N)")]),n._v(" "),a("p",[n._v("这三种排序算法都利用了桶的概念,但对桶的使用方法上有明显差异:"),a("br"),n._v("\n1.基数排序:根据键值的每位数字来分配桶"),a("br"),n._v("\n2.计数排序:每个桶只存储单一键值"),a("br"),n._v("\n3.桶排序:每个桶存储一定范围的数值")]),n._v(" "),a("div",{staticClass:"language- line-numbers-mode"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[n._v("Array.prototype.radixSort = function() {\n let arr = this.slice(0)\n const max = Math.max(...arr)\n let digit = `${max}`.length\n let start = 1\n let buckets = []\n\n while(digit > 0) {\n start *= 10\n for(let i = 0; i < arr.length; i++) {\n const index = arr[i] % start\n !buckets[index] && (buckets[index] = [])\n buckets[index].push(arr[i])\n }\n arr = []\n for(let i = 0; i < buckets.length; i++) {\n buckets[i] && (arr = arr.concat(buckets[i]))\n }\n buckets = []\n digit --\n }\n return arr\n}\nconst arr = [1, 10, 100, 1000, 98, 67, 3, 28, 67, 888, 777]\nconsole.log(arr.radixSort())\n")])]),n._v(" "),a("div",{staticClass:"line-numbers-wrapper"},[a("span",{staticClass:"line-number"},[n._v("1")]),a("br"),a("span",{staticClass:"line-number"},[n._v("2")]),a("br"),a("span",{staticClass:"line-number"},[n._v("3")]),a("br"),a("span",{staticClass:"line-number"},[n._v("4")]),a("br"),a("span",{staticClass:"line-number"},[n._v("5")]),a("br"),a("span",{staticClass:"line-number"},[n._v("6")]),a("br"),a("span",{staticClass:"line-number"},[n._v("7")]),a("br"),a("span",{staticClass:"line-number"},[n._v("8")]),a("br"),a("span",{staticClass:"line-number"},[n._v("9")]),a("br"),a("span",{staticClass:"line-number"},[n._v("10")]),a("br"),a("span",{staticClass:"line-number"},[n._v("11")]),a("br"),a("span",{staticClass:"line-number"},[n._v("12")]),a("br"),a("span",{staticClass:"line-number"},[n._v("13")]),a("br"),a("span",{staticClass:"line-number"},[n._v("14")]),a("br"),a("span",{staticClass:"line-number"},[n._v("15")]),a("br"),a("span",{staticClass:"line-number"},[n._v("16")]),a("br"),a("span",{staticClass:"line-number"},[n._v("17")]),a("br"),a("span",{staticClass:"line-number"},[n._v("18")]),a("br"),a("span",{staticClass:"line-number"},[n._v("19")]),a("br"),a("span",{staticClass:"line-number"},[n._v("20")]),a("br"),a("span",{staticClass:"line-number"},[n._v("21")]),a("br"),a("span",{staticClass:"line-number"},[n._v("22")]),a("br"),a("span",{staticClass:"line-number"},[n._v("23")]),a("br"),a("span",{staticClass:"line-number"},[n._v("24")]),a("br"),a("span",{staticClass:"line-number"},[n._v("25")]),a("br")])]),a("ul",[a("li",[n._v("实现思想")])]),n._v(" "),a("div",{staticClass:"language- line-numbers-mode"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[n._v("/ LSD Radix Sort\n// 比较整型\nvar counter = [];\n\n// 定义一个函数 arr待排序数组 maxDigit数组中最大数的位数,例如[1,10,100]的maxDigit为3\nfunction radixSort(arr, maxDigit) {\n var mod = 10;\n var dev = 1;\n for (var i = 0; i < maxDigit; i++, dev *= 10, mod *= 10) {\n\n // 把待排序的数组 arr 中的每一位整数,插入对应的容器\n for(var j = 0; j < arr.length; j++) {\n\n // 从个位开始,得到数组中每个数的每一位并保存在 bucket 变量中\n // bucket 变量的值可能为 0 1 2 3 4 5 6 7 8 9\n // 与之对应的 counter[bucket] 容器为 0 1 2 3 4 5 6 7 8 9\n var bucket = parseInt((arr[j] % mod) / dev);\n\n // 如果目前 bucket 变量的值对应的 counter[bucket] 容器还不存在(未初始化),则创建(初始化)一个新的空容器\n if(counter[bucket]==null) {\n counter[bucket] = [];\n }\n // 现在把这个 bucket 变量的值插入对应的 counter[bucket] 容器的尾部\n counter[bucket].push(arr[j]);\n }\n\n // 把 counter[bucket] 容器里的数依次取出 \n var pos = 0;\n for(var j = 0; j < counter.length; j++) {\n // 定义一个变量 value 用于保存conter[j].shift\n var value = null;\n if(counter[j]!=null) {\n while ((value = counter[j].shift()) != null) {\n arr[pos++] = value;\n }\n }\n }\n }\n return arr;\n}\n\nconsole.log(radixSort([99,15,48,75,46,37,90,100],3));\n\n\n\n// 第一步\n\n0 90 100\n1 \n2\n3\n4\n5 15 75\n6 46\n7 37\n8 48\n9 99\n\n=> 90 100 15 75 46 37 48 99\n\n--------------------------------------\n1.取末尾 按照对应的末尾数字排序 |\n100 75 |\n90 15 46 37 48 99 |\n0 1 2 3 4 5 6 7 8 9 |\n--------------------------------------\n\n// 第二步\n\n0 100\n1 15\n2\n3 37\n4 46 48\n5\n6\n7 75\n8\n9 90 99\n\n=> 100 15 37 46 48 75 90 99\n\n--------------------------------------------------------------------\n2.原排序进一位,原来开始最低位是个位,进一位就是十位, 按照对应的十位数字排序 |\n 48 99 |\n100 15 37 46 75 90 |\n0 1 2 3 4 5 6 7 8 9 |\n-------------------------------------------------------------------\n\n\n// 第三步\n\n0 15 37 46 48 75 90 99\n1 100 \n2\n3\n4\n5\n6\n7\n8\n9\n\n=> 15 37 46 48 75 90 99 100\n\n\n--------------------------------------------------------------------\n3.排序再进一位,上一次是十位,进一位就是百位,原来的两位数字补0,如 15--\x3e 015 按照对应的百位数字排序 |\n99\n90\n75\n48\n46\n37 |\n15 100 |\n0 1 2 3 4 5 6 7 8 9 |\n-------------------------------------------------------------------\n\n\n")])]),n._v(" "),a("div",{staticClass:"line-numbers-wrapper"},[a("span",{staticClass:"line-number"},[n._v("1")]),a("br"),a("span",{staticClass:"line-number"},[n._v("2")]),a("br"),a("span",{staticClass:"line-number"},[n._v("3")]),a("br"),a("span",{staticClass:"line-number"},[n._v("4")]),a("br"),a("span",{staticClass:"line-number"},[n._v("5")]),a("br"),a("span",{staticClass:"line-number"},[n._v("6")]),a("br"),a("span",{staticClass:"line-number"},[n._v("7")]),a("br"),a("span",{staticClass:"line-number"},[n._v("8")]),a("br"),a("span",{staticClass:"line-number"},[n._v("9")]),a("br"),a("span",{staticClass:"line-number"},[n._v("10")]),a("br"),a("span",{staticClass:"line-number"},[n._v("11")]),a("br"),a("span",{staticClass:"line-number"},[n._v("12")]),a("br"),a("span",{staticClass:"line-number"},[n._v("13")]),a("br"),a("span",{staticClass:"line-number"},[n._v("14")]),a("br"),a("span",{staticClass:"line-number"},[n._v("15")]),a("br"),a("span",{staticClass:"line-number"},[n._v("16")]),a("br"),a("span",{staticClass:"line-number"},[n._v("17")]),a("br"),a("span",{staticClass:"line-number"},[n._v("18")]),a("br"),a("span",{staticClass:"line-number"},[n._v("19")]),a("br"),a("span",{staticClass:"line-number"},[n._v("20")]),a("br"),a("span",{staticClass:"line-number"},[n._v("21")]),a("br"),a("span",{staticClass:"line-number"},[n._v("22")]),a("br"),a("span",{staticClass:"line-number"},[n._v("23")]),a("br"),a("span",{staticClass:"line-number"},[n._v("24")]),a("br"),a("span",{staticClass:"line-number"},[n._v("25")]),a("br"),a("span",{staticClass:"line-number"},[n._v("26")]),a("br"),a("span",{staticClass:"line-number"},[n._v("27")]),a("br"),a("span",{staticClass:"line-number"},[n._v("28")]),a("br"),a("span",{staticClass:"line-number"},[n._v("29")]),a("br"),a("span",{staticClass:"line-number"},[n._v("30")]),a("br"),a("span",{staticClass:"line-number"},[n._v("31")]),a("br"),a("span",{staticClass:"line-number"},[n._v("32")]),a("br"),a("span",{staticClass:"line-number"},[n._v("33")]),a("br"),a("span",{staticClass:"line-number"},[n._v("34")]),a("br"),a("span",{staticClass:"line-number"},[n._v("35")]),a("br"),a("span",{staticClass:"line-number"},[n._v("36")]),a("br"),a("span",{staticClass:"line-number"},[n._v("37")]),a("br"),a("span",{staticClass:"line-number"},[n._v("38")]),a("br"),a("span",{staticClass:"line-number"},[n._v("39")]),a("br"),a("span",{staticClass:"line-number"},[n._v("40")]),a("br"),a("span",{staticClass:"line-number"},[n._v("41")]),a("br"),a("span",{staticClass:"line-number"},[n._v("42")]),a("br"),a("span",{staticClass:"line-number"},[n._v("43")]),a("br"),a("span",{staticClass:"line-number"},[n._v("44")]),a("br"),a("span",{staticClass:"line-number"},[n._v("45")]),a("br"),a("span",{staticClass:"line-number"},[n._v("46")]),a("br"),a("span",{staticClass:"line-number"},[n._v("47")]),a("br"),a("span",{staticClass:"line-number"},[n._v("48")]),a("br"),a("span",{staticClass:"line-number"},[n._v("49")]),a("br"),a("span",{staticClass:"line-number"},[n._v("50")]),a("br"),a("span",{staticClass:"line-number"},[n._v("51")]),a("br"),a("span",{staticClass:"line-number"},[n._v("52")]),a("br"),a("span",{staticClass:"line-number"},[n._v("53")]),a("br"),a("span",{staticClass:"line-number"},[n._v("54")]),a("br"),a("span",{staticClass:"line-number"},[n._v("55")]),a("br"),a("span",{staticClass:"line-number"},[n._v("56")]),a("br"),a("span",{staticClass:"line-number"},[n._v("57")]),a("br"),a("span",{staticClass:"line-number"},[n._v("58")]),a("br"),a("span",{staticClass:"line-number"},[n._v("59")]),a("br"),a("span",{staticClass:"line-number"},[n._v("60")]),a("br"),a("span",{staticClass:"line-number"},[n._v("61")]),a("br"),a("span",{staticClass:"line-number"},[n._v("62")]),a("br"),a("span",{staticClass:"line-number"},[n._v("63")]),a("br"),a("span",{staticClass:"line-number"},[n._v("64")]),a("br"),a("span",{staticClass:"line-number"},[n._v("65")]),a("br"),a("span",{staticClass:"line-number"},[n._v("66")]),a("br"),a("span",{staticClass:"line-number"},[n._v("67")]),a("br"),a("span",{staticClass:"line-number"},[n._v("68")]),a("br"),a("span",{staticClass:"line-number"},[n._v("69")]),a("br"),a("span",{staticClass:"line-number"},[n._v("70")]),a("br"),a("span",{staticClass:"line-number"},[n._v("71")]),a("br"),a("span",{staticClass:"line-number"},[n._v("72")]),a("br"),a("span",{staticClass:"line-number"},[n._v("73")]),a("br"),a("span",{staticClass:"line-number"},[n._v("74")]),a("br"),a("span",{staticClass:"line-number"},[n._v("75")]),a("br"),a("span",{staticClass:"line-number"},[n._v("76")]),a("br"),a("span",{staticClass:"line-number"},[n._v("77")]),a("br"),a("span",{staticClass:"line-number"},[n._v("78")]),a("br"),a("span",{staticClass:"line-number"},[n._v("79")]),a("br"),a("span",{staticClass:"line-number"},[n._v("80")]),a("br"),a("span",{staticClass:"line-number"},[n._v("81")]),a("br"),a("span",{staticClass:"line-number"},[n._v("82")]),a("br"),a("span",{staticClass:"line-number"},[n._v("83")]),a("br"),a("span",{staticClass:"line-number"},[n._v("84")]),a("br"),a("span",{staticClass:"line-number"},[n._v("85")]),a("br"),a("span",{staticClass:"line-number"},[n._v("86")]),a("br"),a("span",{staticClass:"line-number"},[n._v("87")]),a("br"),a("span",{staticClass:"line-number"},[n._v("88")]),a("br"),a("span",{staticClass:"line-number"},[n._v("89")]),a("br"),a("span",{staticClass:"line-number"},[n._v("90")]),a("br"),a("span",{staticClass:"line-number"},[n._v("91")]),a("br"),a("span",{staticClass:"line-number"},[n._v("92")]),a("br"),a("span",{staticClass:"line-number"},[n._v("93")]),a("br"),a("span",{staticClass:"line-number"},[n._v("94")]),a("br"),a("span",{staticClass:"line-number"},[n._v("95")]),a("br"),a("span",{staticClass:"line-number"},[n._v("96")]),a("br"),a("span",{staticClass:"line-number"},[n._v("97")]),a("br"),a("span",{staticClass:"line-number"},[n._v("98")]),a("br"),a("span",{staticClass:"line-number"},[n._v("99")]),a("br"),a("span",{staticClass:"line-number"},[n._v("100")]),a("br"),a("span",{staticClass:"line-number"},[n._v("101")]),a("br"),a("span",{staticClass:"line-number"},[n._v("102")]),a("br"),a("span",{staticClass:"line-number"},[n._v("103")]),a("br"),a("span",{staticClass:"line-number"},[n._v("104")]),a("br"),a("span",{staticClass:"line-number"},[n._v("105")]),a("br"),a("span",{staticClass:"line-number"},[n._v("106")]),a("br"),a("span",{staticClass:"line-number"},[n._v("107")]),a("br"),a("span",{staticClass:"line-number"},[n._v("108")]),a("br"),a("span",{staticClass:"line-number"},[n._v("109")]),a("br"),a("span",{staticClass:"line-number"},[n._v("110")]),a("br"),a("span",{staticClass:"line-number"},[n._v("111")]),a("br"),a("span",{staticClass:"line-number"},[n._v("112")]),a("br"),a("span",{staticClass:"line-number"},[n._v("113")]),a("br"),a("span",{staticClass:"line-number"},[n._v("114")]),a("br"),a("span",{staticClass:"line-number"},[n._v("115")]),a("br"),a("span",{staticClass:"line-number"},[n._v("116")]),a("br"),a("span",{staticClass:"line-number"},[n._v("117")]),a("br"),a("span",{staticClass:"line-number"},[n._v("118")]),a("br"),a("span",{staticClass:"line-number"},[n._v("119")]),a("br")])]),a("h2",{attrs:{id:"参考"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#参考","aria-hidden":"true"}},[n._v("#")]),n._v(" 参考")]),n._v(" "),a("ul",[a("li",[a("a",{attrs:{href:"https://zh.wikipedia.org/wiki/%E5%9F%BA%E6%95%B0%E6%8E%92%E5%BA%8F",target:"_blank",rel:"noopener noreferrer"}},[n._v("wiki---基数排序"),a("OutboundLink")],1)]),n._v(" "),a("li",[a("a",{attrs:{href:"https://blog.csdn.net/swpu_Leo/article/details/72251301",target:"_blank",rel:"noopener noreferrer"}},[n._v("排序算法之基数排序 - JavaScript实现"),a("OutboundLink")],1)])])])},[],!1,null,null,null);s.default=t.exports}}]);
tenant_resolver.ts
/* * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ import { isEmpty, findKey, cloneDeep } from 'lodash'; import { OpenSearchDashboardsRequest } from '../../../../src/core/server'; import { SecuritySessionCookie } from '../session/security_cookie'; import { SecurityPluginConfigType } from '..'; const PRIVATE_TENANT_SYMBOL: string = '__user__'; const GLOBAL_TENANT_SYMBOL: string = ''; export const PRIVATE_TENANTS: string[] = [PRIVATE_TENANT_SYMBOL, 'private']; export const GLOBAL_TENANTS: string[] = ['global', GLOBAL_TENANT_SYMBOL]; /** * Resovles the tenant the user is using. * * @param request OpenSearchDashboards request. * @param config security plugin config. * @param cookie cookie extracted from the request. The cookie should have been parsed by AuthenticationHandler. * pass it as parameter instead of extracting again. * @param authInfo authentication info, the Elasticsearch authinfo API response. * * @returns user preferred tenant of the request. */ export function resolveTenant( request: OpenSearchDashboardsRequest, username: string, availabeTenants: any, config: SecurityPluginConfigType, cookie: SecuritySessionCookie ): string | undefined { let selectedTenant: string | undefined; const query: any = request.url.query as any; if (query && (query.security_tenant || query.securitytenant)) { selectedTenant = query.security_tenant ? query.security_tenant : query.securitytenant; } else if (request.headers.securitytenant || request.headers.security_tenant) { selectedTenant = request.headers.securitytenant ? (request.headers.securitytenant as string) : (request.headers.security_tenant as string); } else if (isValidTenant(cookie.tenant)) { selectedTenant = cookie.tenant; } else { selectedTenant = undefined; } const preferredTenants = config.multitenancy?.tenants.preferred; const globalTenantEnabled = config.multitenancy?.tenants.enable_global || false; const privateTenantEnabled = config.multitenancy?.tenants.enable_private || false; return resolve( username, selectedTenant, preferredTenants, availabeTenants, globalTenantEnabled, privateTenantEnabled ); } /** * Determines whether the request requires tenant info. * @param request opensearch-dashboards request. * * @returns true if the request requires tenant info, otherwise false. */ export function isMultitenantPath(request: OpenSearchDashboardsRequest): boolean { return ( request.url.pathname?.startsWith('/opensearch') || request.url.pathname?.startsWith('/api') || request.url.pathname?.startsWith('/app') || // short url path request.url.pathname?.startsWith('/goto') || // bootstrap.js depends on tenant info to fetch opensearch-dashboards configs in tenant index (request.url.pathname?.indexOf('bootstrap.js') || -1) > -1 || request.url.pathname === '/' ); } function
( username: string, requestedTenant: string | undefined, preferredTenants: string[] | undefined, availableTenants: any, // is an object like { tenant_name_1: true, tenant_name_2: false, ... } globalTenantEnabled: boolean, privateTenantEnabled: boolean ): string | undefined { const availableTenantsClone = cloneDeep(availableTenants); delete availableTenantsClone[username]; if (!globalTenantEnabled && !privateTenantEnabled && isEmpty(availableTenantsClone)) { return undefined; } if (isValidTenant(requestedTenant)) { requestedTenant = requestedTenant!; if (requestedTenant in availableTenants) { return requestedTenant; } if ( privateTenantEnabled && username in availableTenants && PRIVATE_TENANTS.indexOf(requestedTenant) > -1 ) { return PRIVATE_TENANT_SYMBOL; } if (globalTenantEnabled && GLOBAL_TENANTS.indexOf(requestedTenant) > -1) { return GLOBAL_TENANT_SYMBOL; } } if (preferredTenants && !isEmpty(preferredTenants)) { for (const element of preferredTenants) { const tenant = element.toLowerCase(); if (globalTenantEnabled && GLOBAL_TENANTS.indexOf(tenant) > -1) { return GLOBAL_TENANT_SYMBOL; } if ( privateTenantEnabled && PRIVATE_TENANTS.indexOf(tenant) > -1 && username in availableTenants ) { return PRIVATE_TENANT_SYMBOL; } if (tenant in availableTenants) { return tenant; } } } if (globalTenantEnabled) { return GLOBAL_TENANT_SYMBOL; } if (privateTenantEnabled) { return PRIVATE_TENANT_SYMBOL; } // fall back to the first tenant in the available tenants return findKey(availableTenantsClone, () => true); } /** * Return true if tenant parameter is a valid tenent. * * Note: empty string '' is valid, which means global tenant. * * @param tenant */ export function isValidTenant(tenant: string | undefined | null): boolean { return tenant !== undefined && tenant !== null; }
resolve
channel-closed-table.component.spec.ts
import { waitForAsync, ComponentFixture, TestBed } from '@angular/core/testing'; import { StoreModule } from '@ngrx/store'; import { RTLReducer } from '../../../../../store/rtl.reducers'; import { CommonService } from '../../../../../shared/services/common.service'; import { LoggerService } from '../../../../../shared/services/logger.service'; import { ChannelClosedTableComponent } from './channel-closed-table.component'; import { mockDataService, mockLoggerService } from '../../../../../shared/test-helpers/mock-services'; import { SharedModule } from '../../../../../shared/shared.module'; import { BrowserAnimationsModule } from '@angular/platform-browser/animations'; import { DataService } from '../../../../../shared/services/data.service'; describe('ChannelClosedTableComponent', () => { let component: ChannelClosedTableComponent; let fixture: ComponentFixture<ChannelClosedTableComponent>; beforeEach(waitForAsync(() => { TestBed.configureTestingModule({ declarations: [ChannelClosedTableComponent], imports: [ BrowserAnimationsModule, SharedModule, StoreModule.forRoot(RTLReducer, { runtimeChecks: { strictStateImmutability: false, strictActionImmutability: false } }) ], providers: [ CommonService, { provide: LoggerService, useClass: mockLoggerService }, { provide: DataService, useClass: mockDataService } ]
beforeEach(() => { fixture = TestBed.createComponent(ChannelClosedTableComponent); component = fixture.componentInstance; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); }); afterEach(() => { TestBed.resetTestingModule(); }); });
}). compileComponents(); }));
Arrays.rs
use std::rc::Rc; use crate::{Array, QSharpIntrinsics}; #[inline] pub fn EmptyArray<Sim: QSharpIntrinsics, T>(_sim: &mut Sim) -> Array<T> { Rc::new(Vec::new()) } #[inline] pub fn Head<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> T { array[0].clone() } #[inline] pub fn Head_adj<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> T
#[inline] pub fn Tail<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> T { array[array.len() - 1].clone() } #[inline] pub fn Tail_adj<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> T { array[array.len() - 1].clone() } #[inline] pub fn Rest<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> Array<T> { Rc::new(array[1..].to_vec()) } #[inline] pub fn Rest_adj<Sim: QSharpIntrinsics, T: Clone>(_sim: &mut Sim, array: Array<T>) -> Array<T> { Rc::new(array[1..].to_vec()) }
{ array[0].clone() }
entry.rs
//! The `entry` module is a fundamental building block of Proof of History. It contains a //! unique ID that is the hash of the Entry before it, plus the hash of the //! transactions within it. Entries cannot be reordered, and its field `num_hashes` //! represents an approximate amount of time since the last Entry was created. use crate::poh::Poh; use dlopen::symbor::{Container, SymBorApi, Symbol}; use dlopen_derive::SymBorApi; use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; use rayon::ThreadPool; use serde::{Deserialize, Serialize}; use solana_measure::measure::Measure; use solana_merkle_tree::MerkleTree; use solana_metrics::*; use solana_perf::{ cuda_runtime::PinnedVec, packet::{Packet, Packets, PacketsRecycler, PACKETS_PER_BATCH}, perf_libs, recycler::Recycler, sigverify, }; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::hash::Hash; use solana_sdk::packet::Meta; use solana_sdk::timing; use solana_sdk::transaction::{ Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, }; use std::cell::RefCell; use std::ffi::OsStr; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Once; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::Instant; use std::{cmp, thread}; thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .thread_name(|ix| format!("entry_{}", ix)) .build() .unwrap())); pub type EntrySender = Sender<Vec<Entry>>; pub type EntryReceiver = Receiver<Vec<Entry>>; static mut API: Option<Container<Api>> = None; pub fn init_poh() { init(OsStr::new("libpoh-simd.so")); } fn init(name: &OsStr) { static INIT_HOOK: Once = Once::new(); info!("Loading {:?}", name); unsafe { INIT_HOOK.call_once(|| { let path; let lib_name = if let Some(perf_libs_path) = solana_perf::perf_libs::locate_perf_libs() { solana_perf::perf_libs::append_to_ld_library_path( perf_libs_path.to_str().unwrap_or("").to_string(), ); path = perf_libs_path.join(name); path.as_os_str() } else { name }; API = Container::load(lib_name).ok(); }) } } pub fn api() -> Option<&'static Container<Api<'static>>> { { static INIT_HOOK: Once = Once::new(); INIT_HOOK.call_once(|| { if std::env::var("TEST_PERF_LIBS").is_ok() { init_poh() } }) } unsafe { API.as_ref() } } #[derive(SymBorApi)] pub struct Api<'a> { pub poh_verify_many_simd_avx512skx: Symbol<'a, unsafe extern "C" fn(hashes: *mut u8, num_hashes: *const u64)>, pub poh_verify_many_simd_avx2: Symbol<'a, unsafe extern "C" fn(hashes: *mut u8, num_hashes: *const u64)>, } /// Each Entry contains three pieces of data. The `num_hashes` field is the number /// of hashes performed since the previous entry. The `hash` field is the result /// of hashing `hash` from the previous entry `num_hashes` times. The `transactions` /// field points to Transactions that took place shortly before `hash` was generated. /// /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you /// get a duration estimate since the last Entry. Since processing power increases /// over time, one should expect the duration `num_hashes` represents to decrease proportionally. /// An upper bound on Duration can be estimated by assuming each hash was generated by the /// world's fastest processor at the time the entry was recorded. Or said another way, it /// is physically not possible for a shorter duration to have occurred if one assumes the /// hash was computed by the world's fastest processor at that time. The hash chain is both /// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of /// Work consensus!) #[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq, Clone)] pub struct Entry { /// The number of hashes since the previous Entry ID. pub num_hashes: u64, /// The SHA-256 hash `num_hashes` after the previous Entry ID. pub hash: Hash, /// An unordered list of transactions that were observed before the Entry ID was /// generated. They may have been observed before a previous Entry ID but were /// pushed back into this list to ensure deterministic interpretation of the ledger. pub transactions: Vec<VersionedTransaction>, } /// Typed entry to distinguish between transaction and tick entries pub enum EntryType { Transactions(Vec<SanitizedTransaction>), Tick(Hash), } impl Entry { /// Creates the next Entry `num_hashes` after `start_hash`. pub fn new(prev_hash: &Hash, mut num_hashes: u64, transactions: Vec<Transaction>) -> Self { // If you passed in transactions, but passed in num_hashes == 0, then // next_hash will generate the next hash and set num_hashes == 1 if num_hashes == 0 && !transactions.is_empty() { num_hashes = 1; } let transactions = transactions.into_iter().map(Into::into).collect::<Vec<_>>(); let hash = next_hash(prev_hash, num_hashes, &transactions); Entry { num_hashes, hash, transactions, } } pub fn new_mut( start_hash: &mut Hash, num_hashes: &mut u64, transactions: Vec<Transaction>, ) -> Self { let entry = Self::new(start_hash, *num_hashes, transactions); *start_hash = entry.hash; *num_hashes = 0; entry } #[cfg(test)] pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self { Entry { num_hashes, hash: *hash, transactions: vec![], } } /// Verifies self.hash is the result of hashing a `start_hash` `self.num_hashes` times. /// If the transaction is not a Tick, then hash that as well. pub fn verify(&self, start_hash: &Hash) -> bool { let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions); if self.hash != ref_hash { warn!( "next_hash is invalid expected: {:?} actual: {:?}", self.hash, ref_hash ); return false; } true } pub fn is_tick(&self) -> bool { self.transactions.is_empty() } } pub fn hash_transactions(transactions: &[VersionedTransaction]) -> Hash { // a hash of a slice of transactions only needs to hash the signatures let signatures: Vec<_> = transactions .iter() .flat_map(|tx| tx.signatures.iter()) .collect(); let merkle_tree = MerkleTree::new(&signatures); if let Some(root_hash) = merkle_tree.get_root() { *root_hash } else { Hash::default() } } /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains /// a signature, the final hash will be a hash of both the previous ID and /// the signature. If num_hashes is zero and there's no transaction data, /// start_hash is returned. pub fn next_hash( start_hash: &Hash, num_hashes: u64, transactions: &[VersionedTransaction], ) -> Hash { if num_hashes == 0 && transactions.is_empty() { return *start_hash; } let mut poh = Poh::new(*start_hash, None); poh.hash(num_hashes.saturating_sub(1)); if transactions.is_empty() { poh.tick().unwrap().hash } else { poh.record(hash_transactions(transactions)).unwrap().hash } } /// Last action required to verify an entry enum VerifyAction { /// Mixin a hash before computing the last hash for a transaction entry Mixin(Hash), /// Compute one last hash for a tick entry Tick, /// No action needed (tick entry with no hashes) None, } pub struct GpuVerificationData { thread_h: Option<JoinHandle<u64>>, hashes: Option<Arc<Mutex<PinnedVec<Hash>>>>, verifications: Option<Vec<(VerifyAction, Hash)>>, } pub enum DeviceVerificationData { Cpu(), Gpu(GpuVerificationData), } pub struct EntryVerificationState { verification_status: EntryVerificationStatus, poh_duration_us: u64, device_verification_data: DeviceVerificationData, } pub struct GpuSigVerificationData { thread_h: Option<JoinHandle<(bool, u64)>>, } pub enum DeviceSigVerificationData { Cpu(), Gpu(GpuSigVerificationData), } pub struct EntrySigVerificationState { verification_status: EntryVerificationStatus, entries: Option<Vec<EntryType>>, device_verification_data: DeviceSigVerificationData, gpu_verify_duration_us: u64, } impl<'a> EntrySigVerificationState { pub fn entries(&mut self) -> Option<Vec<EntryType>> { self.entries.take() } pub fn finish_verify(&mut self) -> bool { match &mut self.device_verification_data { DeviceSigVerificationData::Gpu(verification_state) => { let (verified, gpu_time_us) = verification_state.thread_h.take().unwrap().join().unwrap(); self.gpu_verify_duration_us = gpu_time_us; self.verification_status = if verified { EntryVerificationStatus::Success } else { EntryVerificationStatus::Failure }; verified } DeviceSigVerificationData::Cpu() => { self.verification_status == EntryVerificationStatus::Success } } } pub fn status(&self) -> EntryVerificationStatus { self.verification_status } pub fn gpu_verify_duration(&self) -> u64 { self.gpu_verify_duration_us } } #[derive(Default, Clone)] pub struct VerifyRecyclers { hash_recycler: Recycler<PinnedVec<Hash>>, tick_count_recycler: Recycler<PinnedVec<u64>>, packet_recycler: PacketsRecycler, out_recycler: Recycler<PinnedVec<u8>>, tx_offset_recycler: Recycler<sigverify::TxOffset>, } #[derive(PartialEq, Clone, Copy, Debug)] pub enum EntryVerificationStatus { Failure, Success, Pending, } impl EntryVerificationState { pub fn status(&self) -> EntryVerificationStatus { self.verification_status } pub fn poh_duration_us(&self) -> u64 { self.poh_duration_us } pub fn finish_verify(&mut self) -> bool { match &mut self.device_verification_data { DeviceVerificationData::Gpu(verification_state) => { let gpu_time_us = verification_state.thread_h.take().unwrap().join().unwrap(); let mut verify_check_time = Measure::start("verify_check"); let hashes = verification_state.hashes.take().unwrap(); let hashes = Arc::try_unwrap(hashes) .expect("unwrap Arc") .into_inner() .expect("into_inner"); let res = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { hashes .into_par_iter() .cloned() .zip(verification_state.verifications.take().unwrap()) .all(|(hash, (action, expected))| { let actual = match action { VerifyAction::Mixin(mixin) => { Poh::new(hash, None).record(mixin).unwrap().hash } VerifyAction::Tick => Poh::new(hash, None).tick().unwrap().hash, VerifyAction::None => hash, }; actual == expected }) }) }); verify_check_time.stop(); self.poh_duration_us += gpu_time_us + verify_check_time.as_us(); self.verification_status = if res { EntryVerificationStatus::Success } else { EntryVerificationStatus::Failure }; res } DeviceVerificationData::Cpu() => { self.verification_status == EntryVerificationStatus::Success } } } } pub fn verify_transactions( entries: Vec<Entry>, verify: Arc<dyn Fn(VersionedTransaction) -> Result<SanitizedTransaction> + Send + Sync>, ) -> Result<Vec<EntryType>> { PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { entries .into_par_iter() .map(|entry| { if entry.transactions.is_empty() { Ok(EntryType::Tick(entry.hash)) } else { Ok(EntryType::Transactions( entry .transactions .into_par_iter() .map(verify.as_ref()) .collect::<Result<Vec<_>>>()?, )) } }) .collect() }) }) } pub fn start_verify_transactions( entries: Vec<Entry>, skip_verification: bool, verify_recyclers: VerifyRecyclers, verify: Arc< dyn Fn(VersionedTransaction, TransactionVerificationMode) -> Result<SanitizedTransaction> + Send + Sync, >, ) -> Result<EntrySigVerificationState> { let api = perf_libs::api(); // Use the CPU if we have too few transactions for GPU signature verification to be worth it. // We will also use the CPU if no acceleration API is used or if we're skipping // the signature verification as we'd have nothing to do on the GPU in that case. // TODO: make the CPU-to GPU crossover point dynamic, perhaps based on similar future // heuristics to what might be used in sigverify::ed25519_verify when a dynamic crossover // is introduced for that function (see TODO in sigverify::ed25519_verify) let use_cpu = skip_verification || api.is_none() || entries .iter() .try_fold(0, |accum: usize, entry: &Entry| -> Option<usize> { if accum.saturating_add(entry.transactions.len()) < 512 { Some(accum.saturating_add(entry.transactions.len())) } else { None } }) .is_some(); if use_cpu { let verify_func = { let verification_mode = if skip_verification { TransactionVerificationMode::HashOnly } else { TransactionVerificationMode::FullVerification }; move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> { verify(versioned_tx, verification_mode) } }; let entries = verify_transactions(entries, Arc::new(verify_func)); match entries { Ok(entries_val) => { return Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Success, entries: Some(entries_val), device_verification_data: DeviceSigVerificationData::Cpu(), gpu_verify_duration_us: 0, }); } Err(err) => { return Err(err); } } } let verify_func = { move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> { verify( versioned_tx, TransactionVerificationMode::HashAndVerifyPrecompiles, ) } }; let entries = verify_transactions(entries, Arc::new(verify_func)); match entries { Ok(entries) => { let num_transactions: usize = entries .iter() .map(|entry: &EntryType| -> usize { match entry { EntryType::Transactions(transactions) => transactions.len(), EntryType::Tick(_) => 0, } }) .sum(); if num_transactions == 0 { return Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Success, entries: Some(entries), device_verification_data: DeviceSigVerificationData::Cpu(), gpu_verify_duration_us: 0, }); } let entry_txs: Vec<&SanitizedTransaction> = entries .iter() .filter_map(|entry_type| match entry_type { EntryType::Tick(_) => None, EntryType::Transactions(transactions) => Some(transactions), }) .flatten() .collect::<Vec<_>>(); let mut packets_vec = entry_txs .par_iter() .chunks(PACKETS_PER_BATCH) .map(|slice| { let vec_size = slice.len(); let mut packets = Packets::new_with_recycler( verify_recyclers.packet_recycler.clone(), vec_size, "entry-sig-verify", ); // We use set_len here instead of resize(num_transactions, Packet::default()), to save // memory bandwidth and avoid writing a large amount of data that will be overwritten // soon afterwards. As well, Packet::default() actually leaves the packet data // uninitialized anyway, so the initilization would simply write junk into // the vector anyway. unsafe { packets.packets.set_len(vec_size); } let entry_tx_iter = slice .into_par_iter() .map(|tx| tx.to_versioned_transaction()); let res = packets .packets .par_iter_mut() .zip(entry_tx_iter) .all(|pair| { pair.0.meta = Meta::default(); Packet::populate_packet(pair.0, None, &pair.1).is_ok() }); if res { Ok(packets) } else { Err(TransactionError::SanitizeFailure) } }) .collect::<Result<Vec<_>>>()?; let tx_offset_recycler = verify_recyclers.tx_offset_recycler; let out_recycler = verify_recyclers.out_recycler; let gpu_verify_thread = thread::spawn(move || { let mut verify_time = Measure::start("sigverify"); sigverify::ed25519_verify( &mut packets_vec, &tx_offset_recycler, &out_recycler, false, ); let verified = packets_vec .iter() .all(|packets| packets.packets.iter().all(|p| !p.meta.discard)); verify_time.stop(); (verified, verify_time.as_us()) }); Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Pending, entries: Some(entries), device_verification_data: DeviceSigVerificationData::Gpu(GpuSigVerificationData { thread_h: Some(gpu_verify_thread), }), gpu_verify_duration_us: 0, }) } Err(err) => Err(err), } } fn compare_hashes(computed_hash: Hash, ref_entry: &Entry) -> bool { let actual = if !ref_entry.transactions.is_empty() { let tx_hash = hash_transactions(&ref_entry.transactions); let mut poh = Poh::new(computed_hash, None); poh.record(tx_hash).unwrap().hash } else if ref_entry.num_hashes > 0 { let mut poh = Poh::new(computed_hash, None); poh.tick().unwrap().hash } else { computed_hash }; actual == ref_entry.hash } // an EntrySlice is a slice of Entries pub trait EntrySlice { /// Verifies the hashes and counts of a slice of transactions are all consistent. fn verify_cpu(&self, start_hash: &Hash) -> EntryVerificationState; fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState; fn verify_cpu_x86_simd(&self, start_hash: &Hash, simd_len: usize) -> EntryVerificationState; fn start_verify(&self, start_hash: &Hash, recyclers: VerifyRecyclers) -> EntryVerificationState; fn verify(&self, start_hash: &Hash) -> bool; /// Checks that each entry tick has the correct number of hashes. Entry slices do not /// necessarily end in a tick, so `tick_hash_count` is used to carry over the hash count /// for the next entry slice. fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool; /// Counts tick entries fn tick_count(&self) -> u64; } impl EntrySlice for [Entry] { fn verify(&self, start_hash: &Hash) -> bool { self.start_verify(start_hash, VerifyRecyclers::default()) .finish_verify() } fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState { let now = Instant::now(); let genesis = [Entry { num_hashes: 0, hash: *start_hash, transactions: vec![], }]; let entry_pairs = genesis.par_iter().chain(self).zip(self); let res = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { entry_pairs.all(|(x0, x1)| { let r = x1.verify(&x0.hash); if !r { warn!( "entry invalid!: x0: {:?}, x1: {:?} num txs: {}", x0.hash, x1.hash, x1.transactions.len() ); } r }) }) }); let poh_duration_us = timing::duration_as_us(&now.elapsed()); EntryVerificationState { verification_status: if res { EntryVerificationStatus::Success } else { EntryVerificationStatus::Failure }, poh_duration_us, device_verification_data: DeviceVerificationData::Cpu(), } } fn verify_cpu_x86_simd(&self, start_hash: &Hash, simd_len: usize) -> EntryVerificationState { use solana_sdk::hash::HASH_BYTES; let now = Instant::now(); let genesis = [Entry { num_hashes: 0, hash: *start_hash, transactions: vec![], }]; let aligned_len = ((self.len() + simd_len - 1) / simd_len) * simd_len; let mut hashes_bytes = vec![0u8; HASH_BYTES * aligned_len]; genesis .iter() .chain(self) .enumerate() .for_each(|(i, entry)| { if i < self.len() { let start = i * HASH_BYTES; let end = start + HASH_BYTES; hashes_bytes[start..end].copy_from_slice(&entry.hash.to_bytes()); } }); let mut hashes_chunked: Vec<_> = hashes_bytes.chunks_mut(simd_len * HASH_BYTES).collect(); let mut num_hashes: Vec<u64> = self .iter() .map(|entry| entry.num_hashes.saturating_sub(1)) .collect(); num_hashes.resize(aligned_len, 0); let num_hashes: Vec<_> = num_hashes.chunks(simd_len).collect(); let res = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { hashes_chunked .par_iter_mut() .zip(num_hashes) .enumerate() .all(|(i, (chunk, num_hashes))| { match simd_len { 8 => unsafe { (api().unwrap().poh_verify_many_simd_avx2)( chunk.as_mut_ptr(), num_hashes.as_ptr(), ); }, 16 => unsafe { (api().unwrap().poh_verify_many_simd_avx512skx)( chunk.as_mut_ptr(), num_hashes.as_ptr(), ); }, _ => { panic!("unsupported simd len: {}", simd_len); } } let entry_start = i * simd_len; // The last chunk may produce indexes larger than what we have in the reference entries // because it is aligned to simd_len. let entry_end = std::cmp::min(entry_start + simd_len, self.len()); self[entry_start..entry_end] .iter() .enumerate() .all(|(j, ref_entry)| { let start = j * HASH_BYTES; let end = start + HASH_BYTES; let hash = Hash::new(&chunk[start..end]); compare_hashes(hash, ref_entry) }) }) }) }); let poh_duration_us = timing::duration_as_us(&now.elapsed()); EntryVerificationState { verification_status: if res { EntryVerificationStatus::Success } else { EntryVerificationStatus::Failure }, poh_duration_us, device_verification_data: DeviceVerificationData::Cpu(), } } fn verify_cpu(&self, start_hash: &Hash) -> EntryVerificationState { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] let (has_avx2, has_avx512) = ( is_x86_feature_detected!("avx2"), is_x86_feature_detected!("avx512f"), ); #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] let (has_avx2, has_avx512) = (false, false); if api().is_some() { if has_avx512 && self.len() >= 128 { self.verify_cpu_x86_simd(start_hash, 16) } else if has_avx2 && self.len() >= 48 { self.verify_cpu_x86_simd(start_hash, 8) } else { self.verify_cpu_generic(start_hash) } } else { self.verify_cpu_generic(start_hash) } } fn start_verify( &self, start_hash: &Hash, recyclers: VerifyRecyclers, ) -> EntryVerificationState { let start = Instant::now(); let api = perf_libs::api(); if api.is_none() { return self.verify_cpu(start_hash); } let api = api.unwrap(); inc_new_counter_info!("entry_verify-num_entries", self.len() as usize); let genesis = [Entry { num_hashes: 0, hash: *start_hash, transactions: vec![], }]; let hashes: Vec<Hash> = genesis .iter() .chain(self) .map(|entry| entry.hash) .take(self.len()) .collect(); let mut hashes_pinned = recyclers.hash_recycler.allocate("poh_verify_hash"); hashes_pinned.set_pinnable(); hashes_pinned.resize(hashes.len(), Hash::default()); hashes_pinned.copy_from_slice(&hashes); let mut num_hashes_vec = recyclers .tick_count_recycler .allocate("poh_verify_num_hashes"); num_hashes_vec.reserve_and_pin(cmp::max(1, self.len())); for entry in self { num_hashes_vec.push(entry.num_hashes.saturating_sub(1)); } let length = self.len(); let hashes = Arc::new(Mutex::new(hashes_pinned)); let hashes_clone = hashes.clone(); let gpu_verify_thread = thread::spawn(move || { let mut hashes = hashes_clone.lock().unwrap(); let gpu_wait = Instant::now(); let res; unsafe { res = (api.poh_verify_many)( hashes.as_mut_ptr() as *mut u8, num_hashes_vec.as_ptr(), length, 1, ); } assert!(res == 0, "GPU PoH verify many failed"); inc_new_counter_info!( "entry_verify-gpu_thread", timing::duration_as_us(&gpu_wait.elapsed()) as usize ); timing::duration_as_us(&gpu_wait.elapsed()) }); let verifications = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { self.into_par_iter() .map(|entry| { let answer = entry.hash; let action = if entry.transactions.is_empty() { if entry.num_hashes == 0 { VerifyAction::None } else { VerifyAction::Tick } } else { VerifyAction::Mixin(hash_transactions(&entry.transactions)) }; (action, answer) }) .collect() }) }); let device_verification_data = DeviceVerificationData::Gpu(GpuVerificationData { thread_h: Some(gpu_verify_thread), verifications: Some(verifications), hashes: Some(hashes), }); EntryVerificationState { verification_status: EntryVerificationStatus::Pending, poh_duration_us: timing::duration_as_us(&start.elapsed()), device_verification_data, } } fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool { // When hashes_per_tick is 0, hashing is disabled. if hashes_per_tick == 0 { return true; } for entry in self { *tick_hash_count = tick_hash_count.saturating_add(entry.num_hashes); if entry.is_tick() { if *tick_hash_count != hashes_per_tick { warn!( "invalid tick hash count!: entry: {:#?}, tick_hash_count: {}, hashes_per_tick: {}", entry, tick_hash_count, hashes_per_tick ); return false; } *tick_hash_count = 0; } } *tick_hash_count < hashes_per_tick } fn tick_count(&self) -> u64 { self.iter().filter(|e| e.is_tick()).count() as u64 } } pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry { let entry = Entry::new(start, num_hashes, transactions); *start = entry.hash; entry } #[allow(clippy::same_item_push)] pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> { let mut ticks = Vec::with_capacity(num_ticks as usize); for _ in 0..num_ticks { let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]); ticks.push(new_tick); } ticks } #[allow(clippy::same_item_push)] pub fn create_random_ticks(num_ticks: u64, max_hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> { let mut ticks = Vec::with_capacity(num_ticks as usize); for _ in 0..num_ticks { let hashes_per_tick = thread_rng().gen_range(1, max_hashes_per_tick); let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]); ticks.push(new_tick); } ticks } /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`. pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry
#[cfg(test)] mod tests { use super::*; use solana_sdk::{ hash::{hash, Hash}, pubkey::Pubkey, signature::{Keypair, Signer}, system_transaction, }; use solana_perf::test_tx::{test_invalid_tx, test_tx}; use solana_sdk::transaction::{ Result, SanitizedTransaction, TransactionError, VersionedTransaction, }; #[test] fn test_entry_verify() { let zero = Hash::default(); let one = hash(zero.as_ref()); assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad } fn test_verify_transactions( entries: Vec<Entry>, skip_verification: bool, verify_recyclers: VerifyRecyclers, verify: Arc< dyn Fn( VersionedTransaction, TransactionVerificationMode, ) -> Result<SanitizedTransaction> + Send + Sync, >, ) -> bool { let verify_func = { let verify = verify.clone(); let verification_mode = if skip_verification { TransactionVerificationMode::HashOnly } else { TransactionVerificationMode::FullVerification }; move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> { verify(versioned_tx, verification_mode) } }; let cpu_verify_result = verify_transactions(entries.clone(), Arc::new(verify_func)); let mut gpu_verify_result: EntrySigVerificationState = { let verify_result = start_verify_transactions(entries, skip_verification, verify_recyclers, verify); match verify_result { Ok(res) => res, _ => EntrySigVerificationState { verification_status: EntryVerificationStatus::Failure, entries: None, device_verification_data: DeviceSigVerificationData::Cpu(), gpu_verify_duration_us: 0, }, } }; match cpu_verify_result { Ok(_) => { assert!(gpu_verify_result.verification_status != EntryVerificationStatus::Failure); assert!(gpu_verify_result.finish_verify()); true } _ => { assert!( gpu_verify_result.verification_status == EntryVerificationStatus::Failure || !gpu_verify_result.finish_verify() ); false } } } #[test] fn test_entry_gpu_verify() { let verify_transaction = { move |versioned_tx: VersionedTransaction, verification_mode: TransactionVerificationMode| -> Result<SanitizedTransaction> { let sanitized_tx = { let message_hash = if verification_mode == TransactionVerificationMode::FullVerification { versioned_tx.verify_and_hash_message()? } else { versioned_tx.message.hash() }; SanitizedTransaction::try_create(versioned_tx, message_hash, None, |_| { Err(TransactionError::UnsupportedVersion) }) }?; Ok(sanitized_tx) } }; let recycler = VerifyRecyclers::default(); // Make sure we test with a number of transactions that's not a multiple of PACKETS_PER_BATCH let entries_invalid = (0..1025) .map(|_| { let transaction = test_invalid_tx(); next_entry_mut(&mut Hash::default(), 0, vec![transaction]) }) .collect::<Vec<_>>(); let entries_valid = (0..1025) .map(|_| { let transaction = test_tx(); next_entry_mut(&mut Hash::default(), 0, vec![transaction]) }) .collect::<Vec<_>>(); assert!(!test_verify_transactions( entries_invalid, false, recycler.clone(), Arc::new(verify_transaction) )); assert!(test_verify_transactions( entries_valid, false, recycler, Arc::new(verify_transaction) )); } #[test] fn test_transaction_reorder_attack() { let zero = Hash::default(); // First, verify entries let keypair = Keypair::new(); let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero); let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); assert!(e0.verify(&zero)); // Next, swap two transactions and ensure verification fails. e0.transactions[0] = tx1.into(); // <-- attack e0.transactions[1] = tx0.into(); assert!(!e0.verify(&zero)); } #[test] fn test_transaction_signing() { use solana_sdk::signature::Signature; let zero = Hash::default(); let keypair = Keypair::new(); let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero); let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero); // Verify entry with 2 transactions let mut e0 = vec![Entry::new(&zero, 0, vec![tx0, tx1])]; assert!(e0.verify(&zero)); // Clear signature of the first transaction, see that it does not verify let orig_sig = e0[0].transactions[0].signatures[0]; e0[0].transactions[0].signatures[0] = Signature::default(); assert!(!e0.verify(&zero)); // restore original signature e0[0].transactions[0].signatures[0] = orig_sig; assert!(e0.verify(&zero)); // Resize signatures and see verification fails. let len = e0[0].transactions[0].signatures.len(); e0[0].transactions[0] .signatures .resize(len - 1, Signature::default()); assert!(!e0.verify(&zero)); // Pass an entry with no transactions let e0 = vec![Entry::new(&zero, 0, vec![])]; assert!(e0.verify(&zero)); } #[test] fn test_next_entry() { let zero = Hash::default(); let tick = next_entry(&zero, 1, vec![]); assert_eq!(tick.num_hashes, 1); assert_ne!(tick.hash, zero); let tick = next_entry(&zero, 0, vec![]); assert_eq!(tick.num_hashes, 0); assert_eq!(tick.hash, zero); let keypair = Keypair::new(); let tx0 = system_transaction::transfer(&keypair, &Pubkey::new_unique(), 42, zero); let entry0 = next_entry(&zero, 1, vec![tx0.clone()]); assert_eq!(entry0.num_hashes, 1); assert_eq!(entry0.hash, next_hash(&zero, 1, &[tx0.into()])); } #[test] #[should_panic] fn test_next_entry_panic() { let zero = Hash::default(); let keypair = Keypair::new(); let tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero); next_entry(&zero, 0, vec![tx]); } #[test] fn test_verify_slice1() { solana_logger::setup(); let zero = Hash::default(); let one = hash(zero.as_ref()); assert!(vec![][..].verify(&zero)); // base case assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2]; bad_ticks[1].hash = one; assert!(!bad_ticks.verify(&zero)); // inductive step, bad } #[test] fn test_verify_slice_with_hashes1() { solana_logger::setup(); let zero = Hash::default(); let one = hash(zero.as_ref()); let two = hash(one.as_ref()); assert!(vec![][..].verify(&one)); // base case assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1 assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad let mut ticks = vec![next_entry(&one, 1, vec![])]; ticks.push(next_entry(&ticks.last().unwrap().hash, 1, vec![])); assert!(ticks.verify(&one)); // inductive step let mut bad_ticks = vec![next_entry(&one, 1, vec![])]; bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![])); bad_ticks[1].hash = one; assert!(!bad_ticks.verify(&one)); // inductive step, bad } #[test] fn test_verify_slice_with_hashes_and_transactions() { solana_logger::setup(); let zero = Hash::default(); let one = hash(zero.as_ref()); let two = hash(one.as_ref()); let alice_keypair = Keypair::new(); let bob_keypair = Keypair::new(); let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one); let tx1 = system_transaction::transfer(&bob_keypair, &alice_keypair.pubkey(), 1, one); assert!(vec![][..].verify(&one)); // base case assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one)); // singleton case 1 assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two)); // singleton case 2, bad let mut ticks = vec![next_entry(&one, 1, vec![tx0.clone()])]; ticks.push(next_entry( &ticks.last().unwrap().hash, 1, vec![tx1.clone()], )); assert!(ticks.verify(&one)); // inductive step let mut bad_ticks = vec![next_entry(&one, 1, vec![tx0])]; bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![tx1])); bad_ticks[1].hash = one; assert!(!bad_ticks.verify(&one)); // inductive step, bad } #[test] fn test_verify_tick_hash_count() { let hashes_per_tick = 10; let tx = VersionedTransaction::default(); let no_hash_tx_entry = Entry { transactions: vec![tx.clone()], ..Entry::default() }; let single_hash_tx_entry = Entry { transactions: vec![tx.clone()], num_hashes: 1, ..Entry::default() }; let partial_tx_entry = Entry { num_hashes: hashes_per_tick - 1, transactions: vec![tx.clone()], ..Entry::default() }; let full_tx_entry = Entry { num_hashes: hashes_per_tick, transactions: vec![tx.clone()], ..Entry::default() }; let max_hash_tx_entry = Entry { transactions: vec![tx], num_hashes: u64::MAX, ..Entry::default() }; let no_hash_tick_entry = Entry::new_tick(0, &Hash::default()); let single_hash_tick_entry = Entry::new_tick(1, &Hash::default()); let partial_tick_entry = Entry::new_tick(hashes_per_tick - 1, &Hash::default()); let full_tick_entry = Entry::new_tick(hashes_per_tick, &Hash::default()); let max_hash_tick_entry = Entry::new_tick(u64::MAX, &Hash::default()); // empty batch should succeed if hashes_per_tick hasn't been reached let mut tick_hash_count = 0; let mut entries = vec![]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, 0); // empty batch should fail if hashes_per_tick has been reached tick_hash_count = hashes_per_tick; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, hashes_per_tick); tick_hash_count = 0; // validation is disabled when hashes_per_tick == 0 entries = vec![max_hash_tx_entry.clone()]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, 0)); assert_eq!(tick_hash_count, 0); // partial tick should fail entries = vec![partial_tick_entry.clone()]; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, hashes_per_tick - 1); tick_hash_count = 0; // full tick entry should succeed entries = vec![no_hash_tx_entry, full_tick_entry.clone()]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, 0); // oversized tick entry should fail assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick - 1)); assert_eq!(tick_hash_count, hashes_per_tick); tick_hash_count = 0; // partial tx entry without tick entry should succeed entries = vec![partial_tx_entry]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, hashes_per_tick - 1); tick_hash_count = 0; // full tx entry with tick entry should succeed entries = vec![full_tx_entry.clone(), no_hash_tick_entry]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, 0); // full tx entry with oversized tick entry should fail entries = vec![full_tx_entry.clone(), single_hash_tick_entry.clone()]; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, hashes_per_tick + 1); tick_hash_count = 0; // full tx entry without tick entry should fail entries = vec![full_tx_entry]; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, hashes_per_tick); tick_hash_count = 0; // tx entry and a tick should succeed entries = vec![single_hash_tx_entry.clone(), partial_tick_entry]; assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, 0); // many tx entries and a tick should succeed let tx_entries: Vec<Entry> = (0..hashes_per_tick - 1) .map(|_| single_hash_tx_entry.clone()) .collect(); entries = [tx_entries, vec![single_hash_tick_entry]].concat(); assert!(entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, 0); // check overflow saturation should fail entries = vec![full_tick_entry.clone(), max_hash_tick_entry]; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, u64::MAX); tick_hash_count = 0; // check overflow saturation should fail entries = vec![max_hash_tx_entry, full_tick_entry]; assert!(!entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick)); assert_eq!(tick_hash_count, u64::MAX); } #[test] fn test_poh_verify_fuzz() { solana_logger::setup(); for _ in 0..100 { let mut time = Measure::start("ticks"); let num_ticks = thread_rng().gen_range(1, 100); info!("create {} ticks:", num_ticks); let mut entries = create_random_ticks(num_ticks, 100, Hash::default()); time.stop(); let mut modified = false; if thread_rng().gen_ratio(1, 2) { modified = true; let modify_idx = thread_rng().gen_range(0, num_ticks) as usize; entries[modify_idx].hash = hash(&[1, 2, 3]); } info!("done.. {}", time); let mut time = Measure::start("poh"); let res = entries.verify(&Hash::default()); assert_eq!(res, !modified); time.stop(); info!("{} {}", time, res); } } }
{ assert!(num_hashes > 0 || transactions.is_empty()); let transactions = transactions.into_iter().map(Into::into).collect::<Vec<_>>(); Entry { num_hashes, hash: next_hash(prev_hash, num_hashes, &transactions), transactions, } }
_percentile_source_target_operations.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class PercentileSourceTargetOperations(object): """PercentileSourceTargetOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: The API version to use for this operation. Constant value: "2020-03-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2020-03-01" self.config = config def list_metrics( self, resource_group_name, account_name, source_region, target_region, filter, custom_headers=None, raw=False, **operation_config): """Retrieves the metrics determined by the given filter for the given account, source and target region. This url is only for PBS and Replication Latency data. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param account_name: Cosmos DB database account name. :type account_name: str :param source_region: Source region from which data is written. Cosmos DB region, with spaces between words and each word capitalized. :type source_region: str :param target_region: Target region to which data is written. Cosmos DB region, with spaces between words and each word capitalized. :type target_region: str :param filter: An OData filter expression that describes a subset of metrics to return. The parameters that can be filtered are name.value (name of the metric, can have an or of multiple names), startTime, endTime, and timeGrain. The supported operator is eq. :type filter: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PercentileMetric :rtype: ~azure.mgmt.cosmosdb.models.PercentileMetricPaged[~azure.mgmt.cosmosdb.models.PercentileMetric] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_metrics.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'), 'sourceRegion': self._serialize.url("source_region", source_region, 'str'), 'targetRegion': self._serialize.url("target_region", target_region, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str', min_length=1) query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def
(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.PercentileMetricPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sourceRegion/{sourceRegion}/targetRegion/{targetRegion}/percentile/metrics'}
internal_paging
ants.py
""" Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator. """ import attr from nipype.interfaces.base import ( Directory, File, InputMultiPath, OutputMultiPath, traits, ) from pydra import ShellCommandTask from pydra.engine.specs import ( SpecInfo, ShellSpec, MultiInputFile, MultiOutputFile, MultiInputObj, ) import pydra class ANTSRegistration: def __init__(self, name="BRAINSResample", executable="BRAINSResample"): self.name = name self.executable = executable """ title: Resample Image (BRAINS) category: Registration description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. version: 5.2.0 documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ def
(self): input_fields = [ ( "verbose", attr.ib( type=str, metadata={ "argstr": "--verbose ", "help_string": "", }, ), ), ( "collapse_output_transforms", attr.ib( type=int, metadata={ "argstr": "--collapse-output-transforms ", "help_string": "", }, ), ), ( "dimensionality", attr.ib( type=int, metadata={ "argstr": "--dimensionality ", "help_string": "", }, ), ), ( "float", attr.ib( type=int, metadata={ "argstr": "--float ", "help_string": "", }, ), ), ( "initial_moving_transform", attr.ib( type=MultiInputObj, metadata={ "argstr": "--initial-moving-transform ", "sep": ",", "help_string": "", }, ), ), ( "initialize_transforms_per_stage", attr.ib( type=int, metadata={ "argstr": "--initialize-transforms-per-stage ", "help_string": "", }, ), ), ( "interpolation", attr.ib( type=str, metadata={ "argstr": "--interpolation ", "help_string": "", }, ), ), ( "output", attr.ib( type=MultiInputFile, metadata={ "argstr": "--output ", "sep": ", ", "help_string": "", }, ), ), ( "transform1", attr.ib( type=str, metadata={ "argstr": "--transform ", "help_string": "", }, ), ), ( "metric1", attr.ib( type=str, metadata={ "argstr": "--metric ", "sep": ",", "help_string": "", }, ), ), ( "convergence1", attr.ib( type=list, metadata={ "argstr": "--convergence ", "sep": ",", "help_string": "", }, ), ), ( "smoothing_sigmas1", attr.ib( type=str, metadata={ "argstr": "--smoothing-sigmas ", "help_string": "", }, ), ), ( "shrink_factors1", attr.ib( type=str, metadata={ "argstr": "--shrink-factors ", "help_string": "", }, ), ), ( "use_estimate_learning_rate_once1", attr.ib( type=int, metadata={ "argstr": "--use-estimate-learning-rate-once ", "help_string": "", }, ), ), ( "use_histogram_matching1", attr.ib( type=int, metadata={ "argstr": "--use-histogram-matching ", "help_string": "", }, ), ), ( "masks1", attr.ib( type=MultiInputFile, metadata={ "argstr": "--masks ", "sep": ",", "help_string": "", }, ), ), ( "transform2", attr.ib( type=str, metadata={ "argstr": "--transform ", "help_string": "", }, ), ), ( "metric2", attr.ib( type=str, metadata={ "argstr": "--metric ", "sep": ",", "help_string": "", }, ), ), ( "convergence2", attr.ib( type=list, metadata={ "argstr": "--convergence ", "sep": ",", "help_string": "", }, ), ), ( "smoothing_sigmas2", attr.ib( type=str, metadata={ "argstr": "--smoothing-sigmas ", "help_string": "", }, ), ), ( "shrink_factors2", attr.ib( type=str, metadata={ "argstr": "--shrink-factors ", "help_string": "", }, ), ), ( "use_estimate_learning_rate_once2", attr.ib( type=int, metadata={ "argstr": "--use-estimate-learning-rate-once ", "help_string": "", }, ), ), ( "use_histogram_matching2", attr.ib( type=int, metadata={ "argstr": "--use-histogram-matching ", "help_string": "", }, ), ), ( "masks2", attr.ib( type=MultiInputFile, metadata={ "argstr": "--masks ", "sep": ",", "help_string": "", }, ), ), ( "transform3", attr.ib( type=str, metadata={ "argstr": "--transform ", "help_string": "", }, ), ), ( "metric3", attr.ib( type=str, metadata={ "argstr": "--metric ", "sep": ",", "help_string": "", }, ), ), ( "convergence3", attr.ib( type=list, metadata={ "argstr": "--convergence ", "sep": ",", "help_string": "", }, ), ), ( "smoothing_sigmas3", attr.ib( type=str, metadata={ "argstr": "--smoothing-sigmas ", "help_string": "", }, ), ), ( "shrink_factors3", attr.ib( type=str, metadata={ "argstr": "--shrink-factors ", "help_string": "", }, ), ), ( "use_estimate_learning_rate_once3", attr.ib( type=int, metadata={ "argstr": "--use-estimate-learning-rate-once ", "help_string": "", }, ), ), ( "use_histogram_matching3", attr.ib( type=int, metadata={ "argstr": "--use-histogram-matching ", "help_string": "", }, ), ), ( "masks3", attr.ib( type=MultiInputFile, metadata={ "argstr": "--masks ", "sep": ",", "help_string": "", }, ), ), ( "metric", attr.ib( type=list, metadata={ "argstr": "--metric ", "sep": ",", "help_string": "", }, ), ), ( "smoothing_sigmas", attr.ib( type=str, metadata={ "argstr": "--smoothing-sigmas ", "help_string": "", }, ), ), ( "shrink_factors", attr.ib( type=str, metadata={ "argstr": "--shrink-factors ", "help_string": "", }, ), ), ( "use_estimate_learning_rate_once", attr.ib( type=int, metadata={ "argstr": "--use-estimate-learning-rate-once ", "help_string": "", }, ), ), ( "use_histogram_matching", attr.ib( type=int, metadata={ "argstr": "--use-histogram-matching ", "help_string": "", }, ), ), ( "winsorize_image_intensities", attr.ib( type=list, metadata={ "argstr": "--winsorize-image-intensities ", "sep": ",", "help_string": "", }, ), ), ( "write_composite_transform", attr.ib( type=int, metadata={ "argstr": "--write-composite-transform ", "help_string": "", }, ), ), ] output_fields = [ ( "output", attr.ib( type=MultiOutputFile, metadata={ "output_file_template": "{output}", "help_string": "Resulting deformed image", }, ), ), ] input_spec = SpecInfo(name="Input", fields=input_fields, bases=(ShellSpec,)) output_spec = SpecInfo( name="Output", fields=output_fields, bases=(pydra.specs.ShellOutSpec,) ) task = ShellCommandTask( name=self.name, executable=self.executable, input_spec=input_spec, output_spec=output_spec, ) return task class ANTSJointFusion: def __init__(self, name="BRAINSResample", executable="BRAINSResample"): self.name = name self.executable = executable """ title: Resample Image (BRAINS) category: Registration description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. version: 5.2.0 documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ def get_task(self): input_fields = [] output_fields = [] input_spec = SpecInfo(name="Input", fields=input_fields, bases=(ShellSpec,)) output_spec = SpecInfo( name="Output", fields=output_fields, bases=(pydra.specs.ShellOutSpec,) ) task = ShellCommandTask( name=self.name, executable=self.executable, input_spec=input_spec, output_spec=output_spec, ) return task class ANTSApplyTransform: def __init__(self, name="BRAINSResample", executable="BRAINSResample"): self.name = name self.executable = executable """ title: Resample Image (BRAINS) category: Registration description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. version: 5.2.0 documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. """ def get_task(self): input_fields = [] output_fields = [] input_spec = SpecInfo(name="Input", fields=input_fields, bases=(ShellSpec,)) output_spec = SpecInfo( name="Output", fields=output_fields, bases=(pydra.specs.ShellOutSpec,) ) task = ShellCommandTask( name=self.name, executable=self.executable, input_spec=input_spec, output_spec=output_spec, ) return task
get_task
utils.py
import warnings import numpy as np import cmath as math import scipy as scp import scipy.optimize as opt from types import FunctionType def parse_multidatasets(V,K,weights,precondition=False): #=============================================================================== # Identify if the signals have already been processed by this function if type(V) is not list: if V.size == np.atleast_1d(weights).size: # If so, just return without doing anything if precondition: return V,K,weights,[np.arange(0,len(V))],[1] else: return V,K,weights,[np.arange(0,len(V))] # If multiple signals are specified as a list... if type(V) is list and all([type(Vs) is np.ndarray for Vs in V]): nSignals = len(V) prescales = np.zeros(nSignals) Vlist = [] # Pre-scale the signals, important for fitregmodel when using global fits with arbitrary scales for i in range(nSignals): if precondition: prescales[i] = max(V[i]) Vlist.append(V[i]/prescales[i]) else: Vlist.append(V[i]) V = np.concatenate(Vlist, axis=0) # ...concatenate them along the list elif type(V) is np.ndarray: nSignals = 1 prescales = [1] Vlist = [V] else: raise TypeError('The input signal(s) must be numpy array or a list of numpy arrays.') def prepareKernel(K,nSignals): # If multiple kernels are specified as a list... if type(K) is tuple: K = [Ks for Ks in K] if type(K) is list and all([type(Ks) is np.ndarray for Ks in K]): nKernels = len(K) K = np.concatenate(K, axis=0) # ...concatenate them along the list elif type(K) is np.ndarray: nKernels = 1 else: raise TypeError('The input kernel(s) must be numpy array or a list of numpy arrays.') # Check that the same number of signals and kernel have been passed if nSignals!=nKernels: raise KeyError('The same number of kernels and signals must be specified as lists.') return K if type(K) is FunctionType: Kmulti = lambda p: prepareKernel(K(p),nSignals) else: Kmulti = prepareKernel(K,nSignals) # If multiple weights are specified as a list... if type(weights) is list or not hasattr(weights, "__len__"): weights = np.atleast_1d(weights) if len(weights)==1: weights = np.repeat(weights,nSignals) weights = weights/sum(weights) if len(weights)!=nSignals: raise KeyError('If multiple signals are passed, the same number of weights are required.') weights_ = [] for i in range(len(weights)): weights_ = np.concatenate((weights_,weights[i]*np.ones(len(Vlist[i])))) weights = weights_ else: raise TypeError('The input weights(s) must be numpy array or a list of numpy arrays.') # Get the indices to extract the subsets again Ns = [len(V) for V in Vlist] subset = [None]*nSignals for i in range(nSignals): if i==0: prev = 0 else: prev = subset[i-1][-1]+1 subset[i] = np.arange(prev,prev+Ns[i]) if precondition: return V,Kmulti,weights,subset,prescales else: return V,Kmulti,weights,subset #=============================================================================== def hccm(J,*args): """ Heteroscedasticity Consistent Covariance Matrix (HCCM) ====================================================== Computes the heteroscedasticity consistent covariance matrix (HCCM) of a given LSQ problem given by the Jacobian matrix (J) and the covariance matrix of the data (V). If the residual (res) is specified, the covariance matrix is estimated using some of the methods specified in (mode). The HCCM are valid for both heteroscedasticit and homoscedasticit residual vectors. Usage: ------ C = hccm(J,V) C = hccm(J,res,mode) Arguments: ---------- J (NxM-element array) Jacobian matrix of the residual vector res (N-element array) Vector of residuals mode (string) HCCM estimator, options are: 'HC0' - White, H. (1980) 'HC1' - MacKinnon and White, (1985) 'HC2' - MacKinnon and White, (1985) 'HC3' - Davidson and MacKinnon, (1993) 'HC4' - Cribari-Neto, (2004) 'HC5' - Cribari-Neto, (2007) Returns: -------- C (MxM-element array) Heteroscedasticity consistent covariance matrix References: ------------ [1] White, H. (1980). A heteroskedasticity-consistent covariance matrix estimator and a direct test for heteroskedasticity. Econometrica, 48(4), 817-838 DOI: 10.2307/1912934 [2] MacKinnon and White, (1985). Some heteroskedasticity-consistent covariance matrix estimators with improved finite sample properties. Journal of Econometrics, 29 (1985), pp. 305-325. DOI: 10.1016/0304-4076(85)90158-7 [3] Davidson and MacKinnon, (1993). Estimation and Inference in Econometrics Oxford University Press, New York. [4] Cribari-Neto, F. (2004). Asymptotic inference under heteroskedasticity of unknown form. Computational Statistics & Data Analysis, 45(1), 215-233 DOI: 10.1016/s0167-9473(02)00366-3 [5] Cribari-Neto, F., Souza, T. C., & Vasconcellos, K. L. P. (2007). Inference under heteroskedasticity and leveraged data. Communications in Statistics – Theory and Methods, 36(10), 1877-1888. DOI: 10.1080/03610920601126589 """ # Unpack inputs if len(args)==2: res,mode = args V = [] elif len(args)==1: V = args[0] # Hat matrix H = [email protected](J.T@J)@J.T # Get leverage h = np.diag(H) # Number of parameters (k) & Number of variables (n) n,k = np.shape(J) if isempty(V): # Select estimation method using established nomenclature if mode.upper() == 'HC0': # White,(1980),[1] # Estimate the data covariance matrix V = np.diag(res**2) elif mode.upper() == 'HC1': # MacKinnon and White,(1985),[2] # Estimate the data covariance matrix V = n/(n-k)*np.diag(res**2) elif mode.upper() == 'HC2': # MacKinnon and White,(1985),[2] # Estimate the data covariance matrix V = np.diag(res**2/(1-h)) elif mode.upper() == 'HC3': # Davidson and MacKinnon,(1993),[3] # Estimate the data covariance matrix V = np.diag(res/(1-h))**2 elif mode.upper() == 'HC4': # Cribari-Neto,(2004),[4] # Compute discount factor delta = np.minimum(4,n*h/k) # Estimate the data covariance matrix V = np.diag(res**2./((1 - h)**delta)) elif mode.upper() == 'HC5': # Cribari-Neto,(2007),[5] # Compute inflation factor k = 0.7 alpha = np.minimum(np.maximum(4,k*max(h)/np.mean(h)),h/np.mean(h)) # Estimate the data covariance matrix V = np.diag(res**2./(np.sqrt((1 - h)**alpha))) else: raise KeyError('HCCM estimation mode not found.') # Heteroscedasticity Consistent Covariance Matrix (HCCM) estimator C = np.linalg.pinv(J.T@J)@J.T@V@[email protected](J.T@J) return C #=============================================================================== # ================================================================= def metadata(**kwargs): """ Decorator: Set model metadata as function attributes """ attributes = list(kwargs.keys()) metadata = list(kwargs.values()) def _setmetadata(func): for attribute,data in zip(attributes,metadata): setattr(func,attribute,data) return func return _setmetadata # ================================================================= def gsvd(A,B): #=============================================================================== m,p = A.shape n = B.shape[0] # Economy-sized. useQA = m > p useQB = n > p if useQA: QA,A = scp.linalg.qr(A) A = A[0:p,0:p] QA = QA[:,0:p] m = p if useQB: QB,B = scp.linalg.qr(B) B = B[0:p,0:p] QB = QB[:,0:p] n = p Q,_ = np.linalg.qr(np.vstack((A,B)), mode='reduced') Q1 = Q[0:m,0:p] Q2 = Q[m:m+n,0:p] C,S = csd(Q1,Q2) # Vector of generalized singular values. q = min(m+n,p) # Supress divide by 0 warning with warnings.catch_warnings(): warnings.simplefilter('ignore') U = np.vstack((np.zeros((q-m,1),'double'), np.diag(C,max(0,q-m)).reshape(len(np.diag(C,max(0,q-m))),1)))/np.vstack((np.diag(S,0).reshape(len(np.diag(S,0)),1), np.zeros((q-n,1),'double') )) return U #=============================================================================== def csd(Q1,Q2): #=============================================================================== """ Cosine-Sine Decomposition ------------------------- Given Q1 and Q2 such that Q1'* Q1 + Q2'* Q2 = I, the C-S Decomposition is a joint factorization of the form Q1 = U1*C*V' and Q2=U2*S*V' where U1,U2,V are orthogonal matrices and C and S are diagonal matrices (not necessarily square) satisfying C'* C + S'* S = I The diagonal entries of C and S are nonnegative and the diagonal elements of C are in nondecreasing order. The matrix Q1 cannot have more columns than rows. Based on the Octave code by Artiste (submitted by S.J.Leon): http://www.ar-tiste.com/m-fun/m-fun-index.html """ m,n = Q1.shape p,_ = Q2.shape if m < p: s,c = csd(Q2,Q1) j = np.flip(np.arange(n)) c = c[:,j] s = s[:,j] m = np.minimum(m,p) i = np.flip(np.arange(m)) c[np.arange(m),:] = c[i,:] n = np.minimum(n,p) i = np.flip(np.arange(n)) s[np.arange(n),:] = s[i,:] return c,s _,sdiag,v = np.linalg.svd(Q1) c = np.zeros((m, n)) np.fill_diagonal(c, sdiag) v = v.T.conj() z = np.eye(n,n) z = scp.linalg.hankel(z[:,n-1]) c[0:n,:] = z@c[0:n,:]@z v = v@z Q2 = Q2@v k=0 for j in range(1,n): if c[j,j] <= 1/np.sqrt(2): k=
b = Q2[:,0:k] u2,r = np.linalg.qr(b,mode='complete') s = u2.T@Q2 t = np.minimum(p,n) tt = np.minimum(m,p) if k<t: r2 = s[np.ix_(range(k,p),range(k,t))] _,sdiag,vt = np.linalg.svd(r2) ss= np.zeros(r2.shape) np.fill_diagonal(ss, sdiag) vt = vt.T.conj() s[k:p,k:t] = ss c[:,k:t] = c[:,k:t]@vt w = c[k:tt,k:t] z,r = np.linalg.qr(w,mode='complete') c[k:tt,k:t] = r for j in range(n): if c[j,j]<0: c[j,j] = -c[j,j] for j in range(t): if s[j,j]<0: s[j,j] = -s[j,j] return c,s #=============================================================================== #=============================================================================== def diagf(X): """ Diagonal force X = diagf(X) zeros all the elements off the main diagonal of X. """ X = np.triu(np.tril(X)) return X #=============================================================================== #=============================================================================== def diagp(Y,X,k): """ DIAGP Diagonal positive. Y,X = diagp(Y,X,k) scales the columns of Y and the rows of X by unimodular factors to make the k-th diagonal of X real and positive. """ D = np.diag(X,k) j = np.where((D.real < 0) | (D.imag != 0)) D = np.diag(np.conj(D[j])/abs(D[j])) Y[:,j] = Y[:,j]@D.T X[j,:] = D@X[j,:] X = X+0 # use "+0" to set possible -0 elements to 0 return Y,X #=============================================================================== #=============================================================================== def Jacobian(fcn, x0, lb, ub): """ Finite difference Jacobian estimation Estimates the Jacobian matrix of a vector-valued function ``fcn`` at the point ``x0`` taking into consideration box-constraints defined by the lower and upper bounds ``lb`` and ``ub``. This is a wrapper around the ``scipy.optimize._numdiff.approx_derivative`` function. """ J = opt._numdiff.approx_derivative(fcn,x0,method='2-point',bounds=(lb,ub)) J = np.atleast_2d(J) return J #=============================================================================== #=============================================================================== def movmean(x, N): """ Moving mean =========== Returns an array of local N-point mean values, where each mean is calculated over a sliding window of length k across neighboring elements of x. Usage: ------ xfilt = movmean(x,N) Arguments: ---------- x (array) Array to be filtered N (scalar) Window size Returns: -------- xfilt (array) Filtered array """ xfilt = np.convolve(x, np.ones(N)/N, mode='same') return xfilt #=============================================================================== #=============================================================================== def ovl(A,B): """ Overlap metric ============== Returns the overlap between two vectors A and B. Usage: ------ metric = ovl(A,B) Arguments: ---------- A (N-element array) First vector B (N-element array) Second vector Returns: -------- metric (array) Overlap metric """ A /= np.sum(A) B /= np.sum(B) metric = np.sum(np.minimum(A,B)) return metric #=============================================================================== def isempty(A): #===============================================================================Q A = np.atleast_1d(A) boolean = np.size(A)==0 return boolean #=============================================================================== def multistarts(n,x0,lb,ub): #=============================================================================== if n<0: raise ValueError('The number of requested starting points must be n>0.') if len(x0) != len(lb) or len(x0) != len(ub): raise ValueError('The lower/upper bound size(s) are not compatible with the initial guess vector x0.') # Generate n-1 new starting points within the bounds if n>1: x0 = np.linspace(lb,ub,n-1) else: x0 = [x0] return x0 #===============================================================================
j