file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
monitor.py | import GPUtil
from threading import Thread
import time
class GPUMonitor(Thread):
def __init__(self, delay):
super().__init__()
self.stopped = False
self.delay = delay
self.data = {
g.id: dict(
load=[],
memory=[],
temperature=[]
)
for g in GPUtil.getGPUs()
}
def run(self):
while not self.stopped:
for g in GPUtil.getGPUs():
data = self.data[g.id]
data["load"].append(g.load)
data["memory"].append(g.memoryUsed)
data["temperature"].append(g.temperature)
time.sleep(self.delay)
def stop(self):
| self.stopped = True |
|
meta-revision-ok.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Meta test for compiletest: check that when we give the right error
// patterns, the test passes. See all `meta-revision-bad.rs`. | //[foo] error-pattern:foo
//[bar] error-pattern:bar
#[cfg(foo)] fn die() {panic!("foo");}
#[cfg(bar)] fn die() {panic!("bar");}
fn main() { die(); } |
// revisions: foo bar |
deps.ts | export { cyan, bold } from "https://deno.land/[email protected]/colors/mod.ts"; |
||
basic_client.go | package ezkube
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
type restClient struct {
mgr manager.Manager
}
var _ RestClient = &restClient{}
// NewRestClient creates the root REST client used to interact
// with Kubernetes. Wraps the controller-runtime client.Client.
// TODO: option to skip cache reads
func NewRestClient(mgr manager.Manager) *restClient {
return &restClient{mgr: mgr}
}
func (c *restClient) Get(ctx context.Context, obj Object) error {
objectKey := client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}
return c.mgr.GetClient().Get(ctx, objectKey, obj)
}
func (c *restClient) List(ctx context.Context, obj List, options ...client.ListOption) error {
return c.mgr.GetClient().List(ctx, obj, options...)
}
func (c *restClient) Create(ctx context.Context, obj Object) error { | }
// if one or more reconcile funcs are passed, this will
// in
func (c *restClient) Update(ctx context.Context, obj Object, reconcileFuncs ...ReconcileFunc) error {
return c.mgr.GetClient().Update(ctx, obj)
}
func (c *restClient) UpdateStatus(ctx context.Context, obj Object) error {
return c.mgr.GetClient().Status().Update(ctx, obj)
}
func (c *restClient) Delete(ctx context.Context, obj Object) error {
return c.mgr.GetClient().Delete(ctx, obj)
}
func (c *restClient) Manager() manager.Manager {
return c.mgr
} | return c.mgr.GetClient().Create(ctx, obj) |
script.rs | use core::num;
use std::{any, fmt::format, io::{BufRead, Cursor, Read, Write}, str::FromStr, usize};
use crate::utils::{from_hex, to_hex};
use anyhow::*;
use byteorder::{LittleEndian, ReadBytesExt};
use num_traits::{FromPrimitive, ToPrimitive};
use serde::*;
use snafu::*;
use wasm_bindgen::{prelude::*, throw_str};
use crate::OpCodes;
#[derive(Debug, Snafu)]
pub enum ScriptErrors {
#[snafu(display("Error deserialising Script: {}", error))]
Deserialise { error: anyhow::Error },
#[snafu(display("Error serialising Script field {}: {}", reason, error))]
Serialise {
reason: String,
error: anyhow::Error,
},
}
#[wasm_bindgen]
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Script(
#[serde(serialize_with = "to_hex", deserialize_with = "from_hex")] pub(crate) Vec<u8>,
);
/**
* Serialise Methods
*/
impl Script {
fn to_asm_string_impl(&self, extended: bool) -> Result<String, ScriptErrors> {
let mut cursor = Cursor::new(self.0.clone());
// Read bytes until end of string
Ok(self.read_opcodes(&mut cursor, String::new(), extended)?)
}
fn read_opcodes(
&self,
cursor: &mut Cursor<Vec<u8>>,
builder_str: String,
extended: bool,
) -> Result<String, ScriptErrors> {
if cursor.position() >= self.0.len() as u64 {
return Ok(builder_str);
}
let mut new_str = builder_str.clone();
if cursor.position() > 0 {
new_str.push_str(" ");
}
let byte = match cursor.read_u8() {
Ok(v) => v,
Err(e) => {
return Err(ScriptErrors::Serialise {
reason: "read_byte".to_string(),
error: anyhow!(e),
})
}
};
if let Some(special_opcode) = Script::get_special_opcode(byte, extended, cursor)? {
new_str.push_str(&special_opcode);
return Script::read_opcodes(&self, cursor, new_str, extended);
}
let opcode_str = match FromPrimitive::from_u8(byte) {
Some(v @ OpCodes::OP_0) => match extended {
true => v.to_string(),
false => {
0.to_string()
},
},
Some(v @ OpCodes::OP_PUSHDATA1) => Script::format_pushdata_string(cursor, v, extended)?,
Some(v @ OpCodes::OP_PUSHDATA2) => Script::format_pushdata_string(cursor, v, extended)?,
Some(v @ OpCodes::OP_PUSHDATA4) => Script::format_pushdata_string(cursor, v, extended)?,
Some(v) => v.to_string(),
None => {
return Err(ScriptErrors::Serialise {
reason: byte.to_string(),
error: anyhow!("Unknown opcode"),
})
}
};
new_str.push_str(&opcode_str);
Script::read_opcodes(&self, cursor, new_str, extended)
}
fn get_pushdata_length(
cursor: &mut Cursor<Vec<u8>>,
opcode: OpCodes,
) -> Result<usize, ScriptErrors> {
let result = match opcode {
OpCodes::OP_PUSHDATA1 => cursor.read_u8().and_then(|x| Ok(x as usize)),
OpCodes::OP_PUSHDATA2 => cursor
.read_u16::<LittleEndian>()
.and_then(|x| Ok(x as usize)),
OpCodes::OP_PUSHDATA4 => cursor
.read_u32::<LittleEndian>()
.and_then(|x| Ok(x as usize)),
_ => {
return Err(ScriptErrors::Serialise {
reason: format!("Given opcode {} is not pushdata", opcode),
error: anyhow!(format!("Given opcode {} is not pushdata", opcode)),
})
}
};
result.or_else(|e| {
Err(ScriptErrors::Serialise {
reason: format!("Unable to read data length for opcode: {}", opcode),
error: anyhow!(e),
})
})
}
fn get_pushdata(cursor: &mut Cursor<Vec<u8>>, size: usize) -> Result<String, ScriptErrors> {
let mut data_buf = vec![0; size];
match cursor.read(&mut data_buf) {
Err(e) => Err(ScriptErrors::Serialise {
reason: format!("Read {} OP_PUSHDATA bytes", size),
error: anyhow!(e),
}),
_ => Ok(hex::encode(data_buf)),
}
}
/**
* OpCodes such as OP_PUSH or the numerical OpCodes (OP_1-OP_16)
*/
fn get_special_opcode(
byte: u8,
extended: bool,
cursor: &mut Cursor<Vec<u8>>,
) -> Result<Option<String>, ScriptErrors> {
let code = match byte {
size @ 0x01..=0x4b => {
let pushdata = Script::get_pushdata(cursor, size as usize)?;
match extended {
true => Some(format!("OP_PUSH {} {}", size, pushdata)),
false => Some(pushdata),
}
},
v @ 82..=96 => match OpCodes::from_u8(v) {
Some(num_opcode) => Some(num_opcode.to_string()),
None => None
},
_ => None,
};
Ok(code)
}
fn format_pushdata_string(
cursor: &mut Cursor<Vec<u8>>,
v: OpCodes,
extended: bool,
) -> Result<String, ScriptErrors> {
let size = Script::get_pushdata_length(cursor, v)?;
let pushdata = Script::get_pushdata(cursor, size)?;
Ok(match extended {
true => format!("{} {} {}", v, size, pushdata),
false => pushdata,
})
}
}
/**
* Deserialise Methods
*/
impl Script {
pub(crate) fn from_hex_impl(hex: String) -> Result<Script, ScriptErrors> {
match hex::decode(hex) {
Ok(v) => Ok(Script::from_bytes(v)),
Err(e) => Err(ScriptErrors::Deserialise { error: anyhow!(e) }),
}
}
pub(crate) fn from_asm_string_impl(asm: String) -> Result<Script, ScriptErrors> {
let mut chunks = asm.split(" ").into_iter();
let mut buffer: Vec<u8> = Vec::new();
while let Some(code) = chunks.next() {
// Number OP_CODES
if let Ok(num_code) = u8::from_str(code) {
match num_code {
v @ 0 => buffer.push(v),
v @ 1..=16 => buffer.push(v + 80),
_ => ()
}
continue;
}
// Standard OP_CODES
if let Ok(opcode) = OpCodes::from_str(code) {
if let Some(opcode_byte) = opcode.to_u8() {
buffer.push(opcode_byte);
}
continue;
}
| match length {
op_push @ 0x01..=0x4b => {
buffer.push(op_push as u8);
match hex::decode(code) {
Ok(v) => buffer.append(&mut v.clone()),
Err(e) => return Err(ScriptErrors::Deserialise{ error: anyhow!(e) })
}
},
op_pushdata1_size @ 0x4c..=0xFF => {
match OpCodes::OP_PUSHDATA1.to_u8() {
Some(pushdata1_byte) => buffer.push(pushdata1_byte),
None => return Err(ScriptErrors::Deserialise{ error: anyhow!("Unable to deserialise OP_PUSHDATA1 Code to u8") })
};
buffer.push(op_pushdata1_size as u8);
match hex::decode(code) {
Ok(v) => buffer.append(&mut v.clone()),
Err(e) => return Err(ScriptErrors::Deserialise{ error: anyhow!(e) })
}
},
op_pushdata2_size @ 0x100..=0xFFFF => {
match OpCodes::OP_PUSHDATA2.to_u8() {
Some(pushdata2_byte) => buffer.push(pushdata2_byte),
None => return Err(ScriptErrors::Deserialise{ error: anyhow!("Unable to deserialise OP_PUSHDATA2 Code to u8") })
};
buffer.push(op_pushdata2_size as u8);
match hex::decode(code) {
Ok(v) => buffer.append(&mut v.clone()),
Err(e) => return Err(ScriptErrors::Deserialise{ error: anyhow!(e) })
}
},
size => {
// Cant do a standard match because 0xFFFFFFFF is too large
if size > 0x10000 && size <= 0xFFFFFFFF {
match OpCodes::OP_PUSHDATA4.to_u8() {
Some(pushdata4_byte) => buffer.push(pushdata4_byte),
None => return Err(ScriptErrors::Deserialise{ error: anyhow!("Unable to deserialise OP_PUSHDATA4 Code to u8") })
};
buffer.push(size as u8);
match hex::decode(code) {
Ok(v) => buffer.append(&mut v.clone()),
Err(e) => return Err(ScriptErrors::Deserialise{ error: anyhow!(e) })
}
}
}
}
}
Ok(Script(buffer))
}
}
/**
* Shared Functions
*/
#[wasm_bindgen]
impl Script {
#[wasm_bindgen(js_name = toBytes)]
pub fn to_bytes(&self) -> Vec<u8> {
self.0.clone()
}
#[wasm_bindgen(js_name = fromBytes)]
pub fn from_bytes(bytes: Vec<u8>) -> Script {
Script(bytes)
}
#[wasm_bindgen(js_name = toHex)]
pub fn to_hex(&self) -> String {
hex::encode(self.to_bytes())
}
}
/**
* Native Specific Functions
*/
#[cfg(not(target_arch = "wasm32"))]
impl Script {
pub fn to_asm_string(&self) -> Result<String, ScriptErrors> {
Script::to_asm_string_impl(&self, false)
}
pub fn to_extended_asm_string(&self) -> Result<String, ScriptErrors> {
Script::to_asm_string_impl(&self, true)
}
pub fn from_hex(hex: String) -> Result<Script, ScriptErrors> {
Script::from_hex_impl(hex)
}
pub fn from_asm_string(asm_string: String) -> Result<Script, ScriptErrors> {
Script::from_asm_string_impl(asm_string)
}
}
/**
* WASM Specific Functions
*/
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Script {
#[wasm_bindgen(js_name = toASMString)]
pub fn to_asm_string(&self) -> Result<String, JsValue> {
match Script::to_asm_string_impl(&self, false) {
Ok(v) => Ok(v),
Err(e) => throw_str(&e.to_string()),
}
}
#[wasm_bindgen(js_name = toExtendedASMString)]
pub fn to_extended_asm_string(&self) -> Result<String, JsValue> {
match Script::to_asm_string_impl(&self, true) {
Ok(v) => Ok(v),
Err(e) => throw_str(&e.to_string()),
}
}
#[wasm_bindgen(js_name = fromHex)]
pub fn from_hex(hex: String) -> Result<Script, JsValue> {
match Script::from_hex_impl(hex) {
Ok(v) => Ok(v),
Err(e) => throw_str(&e.to_string()),
}
}
#[wasm_bindgen(js_name = fromASMString)]
pub fn from_asm_string(asm_string: String) -> Result<Script, JsValue> {
match Script::from_asm_string_impl(asm_string) {
Ok(v) => Ok(v),
Err(e) => throw_str(&e.to_string()),
}
}
} | // PUSHDATA OP_CODES
let length = code.len() / 2; |
main.py | """
Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems, Tuebingen, Germany
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import os
from tqdm import tqdm
import cv2
import torch
import itertools
from data import DataAdapterFactory
from experiment_state import ExperimentState
from experiment_settings import ExperimentSettings, recursive_dict_update
from optimization import optimize
from higo import higo_baseline
import general_settings
from evaluation import evaluate_state
from utils.logging import error
settings_dict_base = {
'data_settings': {
'data_type': "XIMEA",
'center_view': None,
'nr_neighbours': 40,
'base_input_path': "<input_data_base_folder>/",
'object_name': None,
'base_output_path': "<output_base_folder>/",
'gt_scan_folder': "<gt_scan_folder>/",
'calibration_path_geometric': "<calibration_base_folder>/geometric/",
'vignetting_file': '<calibration_base_folder>/photometric/vignetting.npz',
'depth_folder': 'tsdf-fusion-depth_oldCV_40_views',
'center_stride': 2,
'depth_scale': 1e-3,
'light_scale': 1e0,
'lazy_image_loading': False,
'input_reup_sample': 1.,
'input_down_sample': 1.,
'manual_center_view_crop': None,
},
'parametrization_settings': {
'locations': 'depth map',
'normals': 'per point',
'materials': 'base specular materials',
'brdf': 'cook torrance F1',
'observation_poses': 'quaternion',
'lights': 'point light',
},
'initialization_settings': {
'normals': 'from_closed_form',
'diffuse': 'from_closed_form',
'specular': 'hardcoded',
'lights': 'precalibrated',
'light_calibration_files': {
"positions": "<calibration_base_folder>/photometric/lights_array.pkl",
"intensities": "<calibration_base_folder>/photometric/LED_light_intensities.npy",
"attenuations": "<calibration_base_folder>/photometric/LED_angular_dependency.npy",
}
},
'default_optimization_settings': {
'parameters': [
'locations',
'poses',
'normals',
'vignetting',
'diffuse_materials',
'specular_weights',
'specular_materials',
'light_positions',
'light_intensities',
'light_attenuations',
],
'losses': {
"photoconsistency L1": 1e-4,
"geometric consistency": 1e1,
"depth compatibility": 1e10,
"normal smoothness": 1e-1,
"material sparsity": 1e-1,
"material smoothness": 1e0,
},
"iterations": 1000,
'visualize_initial': False,
'visualize_results': True,
'target_set': "training",
},
'optimization_steps': [],
}
settings_dict_higo = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'higo',
},
'higo_baseline': {
'step_size': 0.001, #1 mm
'step_radius': 25,
'eta': 10,
'lambda_n': 7.5,
'lambda_s': 3.0,
'lambda_1': 0.1,
'lambda_2': 0.5,
},
}
)
disjoint_iteration = [
{
'parameters': ['specular_weights'],
"iterations": 40,
},
{
'parameters': ['diffuse_materials', 'specular_materials'],
"iterations": 40,
},
{
'parameters': ['normals'],
"iterations": 60,
},
{
'parameters': ['locations', 'observation_poses'],
"iterations": 60,
},
]
settings_dict_disjoint = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'disjoint',
},
'default_optimization_settings': {
},
'optimization_steps': [
*(disjoint_iteration * 5),
# {
# 'iterations': 1000,
# 'parameters': [
# 'observation_poses',
# ],
# 'visualize_initial': True,
# 'target_set': "testing",
# },
]
}
)
settings_dict_proposed = recursive_dict_update(
settings_dict_base,
{
'data_settings': {
'output_path_suffix': 'proposed',
},
'optimization_steps': [
{
'parameters': [
'diffuse_materials',
'specular_weights',
'specular_materials',
'normals',
'observation_poses',
'locations',
],
'visualize_initial': True,
},
# {
# 'parameters': [
# 'observation_poses',
# ],
# 'visualize_initial': True,
# 'target_set': "testing",
# },
]
}
)
def | (settings_dict):
experiment_settings = ExperimentSettings(settings_dict)
# localize to the current computer as required
experiment_settings.localize()
# create an empty experiment object, with the correct parametrizations
experiment_settings.check_stored("parametrization_settings")
experiment_state = ExperimentState.create(experiment_settings.get('parametrization_settings'))
experiment_settings.save("parametrization_settings")
# create the data adapter
experiment_settings.check_stored("data_settings")
data_adapter = DataAdapterFactory(
experiment_settings.get('data_settings')['data_type']
)(
experiment_settings.get('local_data_settings')
)
if not experiment_settings.get('data_settings')['lazy_image_loading']:
device = torch.device(general_settings.device_name)
image_tensors = [
observation.get_image()
for observation in tqdm(data_adapter.images, desc="Preloading training images")
]
# now compact all observations into a few big tensors and remove the old tensors
# this makes for much faster access/operations
data_adapter.compound_image_tensors = {}
data_adapter.compound_image_tensor_sizes = {}
training_indices_batches, _ = data_adapter.get_training_info()
testing_indices_batches, _ = data_adapter.get_testing_info()
for batch in itertools.chain(training_indices_batches, testing_indices_batches):
compound_H = max([image_tensors[i].shape[-2] for i in batch])
compound_W = max([image_tensors[i].shape[-1] for i in batch])
C = len(batch)
compound_images = torch.zeros(C, 3, compound_H, compound_W, dtype=torch.float, device=device)
compound_sizes = torch.zeros(C, 2, dtype=torch.long, device=device)
for idx, image_idx in enumerate(batch):
src_tensor = image_tensors[image_idx]
compound_images[idx,:,:src_tensor.shape[-2], :src_tensor.shape[-1]] = src_tensor
compound_sizes[idx,0] = src_tensor.shape[-1]
compound_sizes[idx,1] = src_tensor.shape[-2]
del data_adapter.images[image_idx]._image
data_adapter.compound_image_tensors[batch] = compound_images
data_adapter.compound_image_tensor_sizes[batch] = compound_sizes
del image_tensors
experiment_settings.save("data_settings")
# initialize the parametrizations with the requested values, if the initialization is not available on disk
initialization_state_folder = experiment_settings.get_state_folder("initialization")
if experiment_settings.check_stored("initialization_settings"):
experiment_state.load(initialization_state_folder)
else:
experiment_state.initialize(data_adapter, experiment_settings.get('local_initialization_settings'))
experiment_state.save(initialization_state_folder)
experiment_settings.save("initialization_settings")
# evaluate_state("initialization",
# experiment_settings.get('data_settings')['object_name'],
# experiment_settings.get('local_data_settings')['gt_scan_folder'],
# experiment_state)
experiment_state.visualize_statics(
experiment_settings.get_output_path(),
data_adapter
)
if experiment_settings.get("higo_baseline") is not None:
higo_state_folder = experiment_settings.get_state_folder("higo")
if not experiment_settings.check_stored("higo_baseline"):
higo_experiment_state = higo_baseline(
experiment_state,
data_adapter,
higo_state_folder,
experiment_settings.get('higo_baseline')
)
higo_experiment_state.visualize(
experiment_settings.get_output_path(),
"higo_baseline",
data_adapter,
losses = [],
shadows_occlusions=False
)
higo_experiment_state.save(higo_state_folder)
experiment_settings.save("higo_baseline")
else:
higo_experiment_state = ExperimentState.copy(experiment_state)
higo_experiment_state.load(higo_state_folder)
evaluate_state("higo baseline",
experiment_settings.get('data_settings')['object_name'],
experiment_settings.get('local_data_settings')['gt_scan_folder'],
higo_experiment_state)
optimization_step_settings = experiment_settings.get('default_optimization_settings')
experiment_settings.check_stored("default_optimization_settings")
experiment_settings.save("default_optimization_settings")
for step_index in range(len(experiment_settings.get('optimization_steps'))):
step_state_folder = experiment_settings.get_state_folder("optimization_steps", step_index)
optimization_settings = experiment_settings.get("optimization_steps", step_index)
shorthand = experiment_settings.get_shorthand("optimization_steps", step_index)
set_name = "%02d_%s" % (step_index, shorthand)
if optimization_settings['visualize_initial']:
experiment_state.visualize(
experiment_settings.get_output_path(),
"%02d__initial" % step_index,
data_adapter,
optimization_settings['losses']
)
if experiment_settings.check_stored("optimization_steps", step_index):
experiment_state.load(step_state_folder)
else:
optimize(
experiment_state,
data_adapter,
optimization_settings,
output_path_structure=os.path.join(
experiment_settings.get_output_path(),
"evolution_%%s_%s.png" % set_name
)
)
experiment_state.save(step_state_folder)
experiment_settings.save("optimization_steps", step_index)
if optimization_settings['visualize_results']:
experiment_state.visualize(
experiment_settings.get_output_path(),
set_name,
data_adapter,
optimization_settings['losses']
)
evaluate_state(experiment_settings.get('data_settings').get('output_path_suffix', 'proposed'),
experiment_settings.get('data_settings')['object_name'],
experiment_settings.get('local_data_settings')['gt_scan_folder'],
experiment_state)
if __name__ == "__main__":
for object_name, center_view in [
# ("peter", 1171),
# ("peter", 566),
# ("teapot", 451),
# ("teapot", 999),
# ("gnome", 308),
# ("gnome", 488),
# ("girl", 882),
# ("girl", 1059),
("duck", 826),
# ("duck", 49),
# ("fire_hydrant", 582),
# ("fire_hydrant", 704),
# ("pineapple", 401),
# ("pineapple", 536),
# ("bunny", 670),
# ("bunny", 180),
]:
for settings_dict in [settings_dict_proposed, settings_dict_higo, settings_dict_disjoint]:
settings_dict['data_settings']['object_name'] = object_name
settings_dict['data_settings']['center_view'] = center_view
run_experiment(settings_dict)
| run_experiment |
regions-fn-subtyping.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2263.
// pretty-expanded FIXME #23616
#![allow(dead_assignment)]
#![allow(unused_variables)]
// Should pass region checking.
fn ok(f: Box<FnMut(&usize)>) {
// Here, g is a function that can accept a usize pointer with
// lifetime r, and f is a function that can accept a usize pointer
// with any lifetime. The assignment g = f should be OK (i.e.,
// f's type should be a subtype of g's type), because f can be | let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|x| { });
g = f;
}
// This version is the same as above, except that here, g's type is
// inferred.
fn ok_inferred(f: Box<FnMut(&usize)>) {
let mut g: Box<for<'r> FnMut(&'r usize)> = Box::new(|_| {});
g = f;
}
pub fn main() {
} | // used in any context that expects g's type. But this currently
// fails. |
signing.rs | use crate::{crypto::{keys::NamedRevision,
Blake2bHash,
PUBLIC_SIG_KEY_VERSION,
SECRET_SIG_KEY_VERSION},
error::{Error,
Result},
fs::Permissions,
origin::Origin};
use std::{io::Read,
path::Path};
/// Private module to re-export the various sodiumoxide concepts we
/// use, to keep them all consolidated and abstracted.
mod primitives {
pub use sodiumoxide::crypto::sign::{ed25519::{PublicKey,
SecretKey},
gen_keypair,
sign,
verify};
}
/// Given the name of an origin, generate a new signing key pair.
///
/// The resulting keys will need to be saved to a cache in order to
/// persist.
pub fn generate_signing_key_pair(origin: &Origin)
-> (PublicOriginSigningKey, SecretOriginSigningKey) {
let named_revision = NamedRevision::new(origin.to_string());
let (pk, sk) = primitives::gen_keypair();
let public = PublicOriginSigningKey { named_revision: named_revision.clone(),
key: pk, };
let secret = SecretOriginSigningKey { named_revision,
key: sk };
(public, secret)
}
////////////////////////////////////////////////////////////////////////
gen_key!(
/// Public key used to verify signatures of Biome artifacts signed with
/// a `SecretOriginSigningKey`.
PublicOriginSigningKey,
key_material: primitives::PublicKey,
file_format_version: PUBLIC_SIG_KEY_VERSION,
file_extension: "pub",
file_permissions: crate::fs::DEFAULT_PUBLIC_KEY_PERMISSIONS);
impl PublicOriginSigningKey {
/// Accept a signed, hex-encoded Blake2b hash, along with the
/// bytes for the content that was supposedly hashed-and-signed,
/// in order to verify the signature and hash.
///
/// Returns the verified, Blake2b hash of the contents.
pub fn verify(&self, signed_hash: &[u8], content: &mut dyn Read) -> Result<Blake2bHash> {
let expected_blake2b_hash = primitives::verify(signed_hash, &self.key)
.map_err(|_| Error::CryptoError("Verification failed".to_string()))
.map(String::from_utf8)? // get the hex-encoded hash
.map_err(|_| Error::CryptoError("Error parsing artifact hash".to_string()))?
.parse()?; // convert to Blake2bHash
let computed_blake2b_hash = Blake2bHash::from_reader(content)?;
if computed_blake2b_hash == expected_blake2b_hash {
Ok(expected_blake2b_hash)
} else {
let msg = format!("Biome artifact is invalid, hashes don't match (expected: {}, \
computed: {})",
expected_blake2b_hash, computed_blake2b_hash);
Err(Error::CryptoError(msg))
}
}
}
////////////////////////////////////////////////////////////////////////
gen_key!(
/// Key used to sign the content hashes of Biome artifacts. | SecretOriginSigningKey,
key_material: primitives::SecretKey,
file_format_version: SECRET_SIG_KEY_VERSION,
file_extension: "sig.key",
file_permissions: crate::fs::DEFAULT_SECRET_KEY_PERMISSIONS);
impl SecretOriginSigningKey {
/// Takes the contents of the given file and returns the signed,
/// hex-encoded Blake2b hash of the contents.
///
/// NOTE: The output is *not* a detached signature; the signed
/// content (the content hash) is recoverable from the output, as
/// intended.
pub fn sign<P>(&self, path: P) -> Result<Vec<u8>>
where P: AsRef<Path>
{
// Note that we're signing the *lower-case, hex-encoded
// string* of the Blake2b hash, NOT the hash itself! This will
// have implications if we ever want to change in the future
// :(
let hex_encoded_hash = Blake2bHash::from_file(&path)?;
Ok(self.sign_inner(hex_encoded_hash.to_string().as_bytes()))
}
/// Does the actual heavy lifting of signing a string of bytes.
///
/// Mainly separate to facilitate testing.
fn sign_inner<B>(&self, bytes: B) -> Vec<u8>
where B: AsRef<[u8]>
{
primitives::sign(bytes.as_ref(), &self.key)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::test_support::{fixture,
fixture_key};
use std::{fs::File,
io::BufReader};
/// The hash of the contents of the `tests/fixtures/signme.dat`
/// file, signed by
/// `tests/fixtures/keys/origin-key-valid-20160509190508.sig.key`.
const SIGNED_SIGNME_DAT_BLAKE2B_HASH: [u8; 128] =
[148u8, 34u8, 226u8, 235u8, 2u8, 136u8, 218u8, 135u8, 130u8, 241u8, 129u8, 134u8, 193u8,
206u8, 3u8, 15u8, 158u8, 99u8, 68u8, 169u8, 139u8, 38u8, 13u8, 140u8, 120u8, 92u8, 152u8,
143u8, 97u8, 135u8, 22u8, 233u8, 20u8, 243u8, 48u8, 63u8, 59u8, 82u8, 26u8, 51u8, 53u8,
63u8, 5u8, 214u8, 166u8, 231u8, 113u8, 123u8, 241u8, 33u8, 25u8, 227u8, 91u8, 201u8,
76u8, 48u8, 199u8, 214u8, 183u8, 110u8, 173u8, 161u8, 150u8, 12u8, 50u8, 48u8, 53u8,
57u8, 48u8, 97u8, 53u8, 50u8, 99u8, 52u8, 102u8, 48u8, 48u8, 53u8, 56u8, 56u8, 99u8,
53u8, 48u8, 48u8, 51u8, 50u8, 56u8, 98u8, 49u8, 54u8, 100u8, 52u8, 54u8, 54u8, 99u8,
57u8, 56u8, 50u8, 97u8, 50u8, 54u8, 102u8, 97u8, 98u8, 97u8, 97u8, 53u8, 102u8, 97u8,
52u8, 100u8, 99u8, 99u8, 56u8, 51u8, 48u8, 53u8, 50u8, 100u8, 100u8, 48u8, 97u8, 56u8,
52u8, 102u8, 50u8, 51u8, 51u8];
/// The hex-encoded Blake2b hash of the contents of
/// `tests/fixtures/signme.dat`.
const SIGNME_DAT_BLAKE2B_HASH: &str =
"20590a52c4f00588c500328b16d466c982a26fabaa5fa4dcc83052dd0a84f233";
#[test]
fn signing() {
let key: SecretOriginSigningKey =
fixture_key("keys/origin-key-valid-20160509190508.sig.key");
let file_to_sign = fixture("signme.dat");
let signed_message = key.sign(&file_to_sign).unwrap();
let expected = SIGNED_SIGNME_DAT_BLAKE2B_HASH.to_vec();
assert_eq!(signed_message.len(), expected.len());
for (i, (actual, expected)) in signed_message.iter().zip(expected.iter()).enumerate() {
assert_eq!(actual, expected,
"Signed messages differ at byte index {}; expected '{}' but got '{}'",
i, expected, actual);
}
}
#[test]
fn verification() {
let key: PublicOriginSigningKey = fixture_key("keys/origin-key-valid-20160509190508.pub");
let f = File::open(fixture("signme.dat")).unwrap();
let mut reader = BufReader::new(f);
let file_blake2b_hash = key.verify(&SIGNED_SIGNME_DAT_BLAKE2B_HASH, &mut reader)
.unwrap();
assert_eq!(file_blake2b_hash,
SIGNME_DAT_BLAKE2B_HASH.parse::<Blake2bHash>().unwrap());
}
#[test]
fn sign_and_verify_roundtrip() {
let sk: SecretOriginSigningKey =
fixture_key("keys/origin-key-valid-20160509190508.sig.key");
let pk: PublicOriginSigningKey = fixture_key("keys/origin-key-valid-20160509190508.pub");
let file_to_sign = fixture("signme.dat");
let signed_message = sk.sign(&file_to_sign).unwrap();
let f = File::open(&file_to_sign).unwrap();
let mut reader = BufReader::new(f);
let expected_hash = SIGNME_DAT_BLAKE2B_HASH.parse::<Blake2bHash>().unwrap();
let verified_hash = pk.verify(&signed_message, &mut reader).unwrap();
assert_eq!(verified_hash, expected_hash);
}
/// This is mainly to encapsulate knowledge about how Biome's
/// signing behaves. We historically have signed the lowercase
/// hex-encoded Blake2b hash digest of a file, rather than
/// signing the digest bytes directly. It is important that we
/// maintain that behavior for backwards compatibility reasons.
#[test]
fn signing_inputs_case_is_significant() {
let origin = "test-origin".parse().unwrap();
let (_public, secret) = generate_signing_key_pair(&origin);
let lower_case = "20590a52c4f00588c500328b16d466c982a26fabaa5fa4dcc83052dd0a84f233";
let upper_case = "20590A52C4F00588C500328B16D466C982A26FABAA5FA4DCC83052DD0A84F233";
// Both of these hex strings represent the same hash digest at
// the byte level
assert_eq!(lower_case.parse::<Blake2bHash>().unwrap(),
upper_case.parse::<Blake2bHash>().unwrap());
let lc_signed = secret.sign_inner(lower_case.as_bytes());
let uc_signed = secret.sign_inner(upper_case.as_bytes());
// But since we're signing the hex-encoding of the bytes, and
// not the bytes themselves, we will end up with different
// results, even though the actual digests are the same!
assert_ne!(lc_signed, uc_signed);
// This just confirms that repeated signing is equivalent in
// the first place (e.g., there isn't a nonce involved)
let lc_signed_2 = secret.sign_inner(lower_case.as_bytes());
assert_eq!(lc_signed, lc_signed_2);
}
} | |
mesos_file.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from . import exceptions
from paasta_tools.async_utils import async_ttl_cache
class File:
chunk_size = 1024
def __init__(self, host, task=None, path=None):
self.host = host
self.task = task
self.path = path
if self.task is None:
self._host_path = self.path
else:
self._host_path = None # Defer until later (_fetch) so we don't make HTTP requests in __init__.
self._offset = 0
# Used during fetch, class level so the dict isn't constantly alloc'd
self._params = {
"path": self._host_path,
"offset": -1,
"length": self.chunk_size,
}
def __eq__(self, y):
return self.key() == y.key()
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
|
def __str__(self):
return f"{self._where}:{self.path}"
def key(self):
return "{}:{}".format(self.host.key(), self._host_path)
@property
def _where(self):
return self.task["id"] if self.task is not None else self.host.key()
async def _fetch(self):
# fill in path if it wasn't set in __init__
if self._params["path"] is None:
self._params["path"] = os.path.join(await self.task.directory(), self.path)
resp = await self.host.fetch("/files/read.json", params=self._params)
if resp.status == 404:
raise exceptions.FileDoesNotExist("No such file or directory.")
return await resp.json()
async def exists(self):
try:
await self.size()
return True
except exceptions.FileDoesNotExist:
return False
except exceptions.SlaveDoesNotExist:
return False
# When reading a file, it is common to first check whether it exists, then
# look at the size to determine where to seek. Instead of requiring
# multiple requests to the slave, the size is cached for a very short
# period of time.
@async_ttl_cache(ttl=0.5, cleanup_self=True)
async def size(self):
return (await self._fetch())["offset"]
async def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self._offset = 0 + offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = await self.size() + offset
def tell(self):
return self._offset
def _length(self, start, size):
if size and self.tell() - start + self.chunk_size > size:
return size - (self.tell() - start)
return self.chunk_size
async def _get_chunk(self, loc, size=None):
if size is None:
size = self.chunk_size
await self.seek(loc, os.SEEK_SET)
self._params["offset"] = loc
self._params["length"] = size
data = (await self._fetch())["data"]
await self.seek(len(data), os.SEEK_CUR)
return data
async def _read(self, size=None):
start = self.tell()
def pre(x):
return x == ""
def post(x):
return size and (self.tell() - start) >= size
blob = None
while blob != "" and not (size and (self.tell() - start) >= size):
blob = await self._get_chunk(self.tell(), size=self._length(start, size))
yield blob
async def _read_reverse(self, size=None):
fsize = await self.size()
if not size:
size = fsize
def next_block():
current = fsize
while (current - self.chunk_size) > (fsize - size):
current -= self.chunk_size
yield current
for pos in next_block():
yield await self._get_chunk(pos)
yield await self._get_chunk(fsize - size, size % self.chunk_size)
async def _readlines(self, size=None):
last = ""
async for blob in self._read(size):
# This is not streaming and assumes small chunk sizes
blob_lines = (last + blob).split("\n")
for line in blob_lines[: len(blob_lines) - 1]:
yield line
last = blob_lines[-1]
async def _readlines_reverse(self, size=None):
buf = ""
async for blob in self._read_reverse(size):
blob_lines = (blob + buf).split("\n")
for line in reversed(blob_lines[1:]):
yield line
buf = blob_lines[0]
yield buf
| return f"<open file '{self.path}', for '{self._where}'>" |
observed_attestations.rs | //! Provides an `ObservedAttestations` struct which allows us to reject aggregated attestations if
//! we've already seen the aggregated attestation.
use parking_lot::RwLock;
use std::collections::HashSet;
use std::marker::PhantomData;
use tree_hash::TreeHash;
use types::{Attestation, EthSpec, Hash256, Slot};
/// As a DoS protection measure, the maximum number of distinct `Attestations` that will be
/// recorded for each slot.
///
/// Currently this is set to ~524k. If we say that each entry is 40 bytes (Hash256 (32 bytes) + an
/// 8 byte hash) then this comes to about 20mb per slot. If we're storing 34 of these slots, then
/// we're at 680mb. This is a lot of memory usage, but probably not a show-stopper for most
/// reasonable hardware.
///
/// Upstream conditions should strongly restrict the amount of attestations that can show up in
/// this pool. The maximum size with respect to upstream restrictions is more likely on the order
/// of the number of validators.
const MAX_OBSERVATIONS_PER_SLOT: usize = 1 << 19; // 524,288
#[derive(Debug, PartialEq)]
pub enum ObserveOutcome {
/// This attestation was already known.
AlreadyKnown,
/// This was the first time this attestation was observed.
New,
}
#[derive(Debug, PartialEq)]
pub enum Error {
SlotTooLow {
slot: Slot,
lowest_permissible_slot: Slot,
},
/// The function to obtain a set index failed, this is an internal error.
InvalidSetIndex(usize),
/// We have reached the maximum number of unique `Attestation` that can be observed in a slot.
/// This is a DoS protection function.
ReachedMaxObservationsPerSlot(usize),
IncorrectSlot {
expected: Slot,
attestation: Slot,
},
}
/// A `HashSet` that contains entries related to some `Slot`.
struct SlotHashSet {
set: HashSet<Hash256>,
slot: Slot,
}
impl SlotHashSet {
pub fn new(slot: Slot, initial_capacity: usize) -> Self {
Self {
slot,
set: HashSet::with_capacity(initial_capacity),
}
}
/// Store the attestation in self so future observations recognise its existence.
pub fn observe_attestation<E: EthSpec>(
&mut self,
a: &Attestation<E>,
root: Hash256,
) -> Result<ObserveOutcome, Error> {
if a.data.slot != self.slot {
return Err(Error::IncorrectSlot {
expected: self.slot,
attestation: a.data.slot,
});
}
if self.set.contains(&root) {
Ok(ObserveOutcome::AlreadyKnown)
} else {
// Here we check to see if this slot has reached the maximum observation count.
//
// The resulting behaviour is that we are no longer able to successfully observe new
// attestations, however we will continue to return `is_known` values. We could also
// disable `is_known`, however then we would stop forwarding attestations across the
// gossip network and I think that this is a worse case than sending some invalid ones.
// The underlying libp2p network is responsible for removing duplicate messages, so
// this doesn't risk a broadcast loop.
if self.set.len() >= MAX_OBSERVATIONS_PER_SLOT {
return Err(Error::ReachedMaxObservationsPerSlot(
MAX_OBSERVATIONS_PER_SLOT,
));
}
self.set.insert(root);
Ok(ObserveOutcome::New)
}
}
/// Indicates if `a` has been observed before.
pub fn is_known<E: EthSpec>(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
if a.data.slot != self.slot {
return Err(Error::IncorrectSlot {
expected: self.slot,
attestation: a.data.slot,
});
}
Ok(self.set.contains(&root))
}
/// The number of observed attestations in `self`.
pub fn len(&self) -> usize {
self.set.len()
}
}
/// Stores the roots of `Attestation` objects for some number of `Slots`, so we can determine if
/// these have previously been seen on the network.
pub struct ObservedAttestations<E: EthSpec> {
lowest_permissible_slot: RwLock<Slot>,
sets: RwLock<Vec<SlotHashSet>>,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> Default for ObservedAttestations<E> {
fn default() -> Self {
Self {
lowest_permissible_slot: RwLock::new(Slot::new(0)),
sets: RwLock::new(vec![]),
_phantom: PhantomData,
}
}
}
impl<E: EthSpec> ObservedAttestations<E> {
/// Store the root of `a` in `self`.
///
/// `root` must equal `a.tree_hash_root()`.
pub fn observe_attestation(
&self,
a: &Attestation<E>,
root_opt: Option<Hash256>,
) -> Result<ObserveOutcome, Error> {
let index = self.get_set_index(a.data.slot)?;
let root = root_opt.unwrap_or_else(|| a.tree_hash_root());
self.sets
.write()
.get_mut(index)
.ok_or_else(|| Error::InvalidSetIndex(index))
.and_then(|set| set.observe_attestation(a, root))
}
/// Check to see if the `root` of `a` is in self.
///
/// `root` must equal `a.tree_hash_root()`.
pub fn is_known(&self, a: &Attestation<E>, root: Hash256) -> Result<bool, Error> {
let index = self.get_set_index(a.data.slot)?;
self.sets
.read()
.get(index)
.ok_or_else(|| Error::InvalidSetIndex(index))
.and_then(|set| set.is_known(a, root))
}
/// The maximum number of slots that attestations are stored for.
fn max_capacity(&self) -> u64 {
// We add `2` in order to account for one slot either side of the range due to
// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`.
E::slots_per_epoch() + 2
}
/// Removes any attestations with a slot lower than `current_slot` and bars any future
/// attestations with a slot lower than `current_slot - SLOTS_RETAINED`.
pub fn prune(&self, current_slot: Slot) {
// Taking advantage of saturating subtraction on `Slot`.
let lowest_permissible_slot = current_slot - (self.max_capacity() - 1);
self.sets
.write()
.retain(|set| set.slot >= lowest_permissible_slot);
*self.lowest_permissible_slot.write() = lowest_permissible_slot;
}
/// Returns the index of `self.set` that matches `slot`.
///
/// If there is no existing set for this slot one will be created. If `self.sets.len() >=
/// Self::max_capacity()`, the set with the lowest slot will be replaced.
fn get_set_index(&self, slot: Slot) -> Result<usize, Error> {
let lowest_permissible_slot: Slot = *self.lowest_permissible_slot.read();
if slot < lowest_permissible_slot {
return Err(Error::SlotTooLow {
slot,
lowest_permissible_slot,
});
}
// Prune the pool if this attestation indicates that the current slot has advanced.
if lowest_permissible_slot + self.max_capacity() < slot + 1 {
self.prune(slot)
}
let mut sets = self.sets.write();
if let Some(index) = sets.iter().position(|set| set.slot == slot) {
return Ok(index);
}
// To avoid re-allocations, try and determine a rough initial capacity for the new set
// by obtaining the mean size of all items in earlier epoch.
let (count, sum) = sets
.iter()
// Only include slots that are less than the given slot in the average. This should
// generally avoid including recent slots that are still "filling up".
.filter(|set| set.slot < slot)
.map(|set| set.len())
.fold((0, 0), |(count, sum), len| (count + 1, sum + len));
// If we are unable to determine an average, just use 128 as it's the target committee
// size for the mainnet spec. This is perhaps a little wasteful for the minimal spec,
// but considering it's approx. 128 * 32 bytes we're not wasting much.
let initial_capacity = sum.checked_div(count).unwrap_or(128);
if sets.len() < self.max_capacity() as usize || sets.is_empty() {
let index = sets.len();
sets.push(SlotHashSet::new(slot, initial_capacity));
return Ok(index);
}
let index = sets
.iter()
.enumerate()
.min_by_key(|(_i, set)| set.slot)
.map(|(i, _set)| i)
.expect("sets cannot be empty due to previous .is_empty() check");
sets[index] = SlotHashSet::new(slot, initial_capacity);
Ok(index)
}
}
#[cfg(test)]
#[cfg(not(debug_assertions))]
mod tests {
use super::*;
use tree_hash::TreeHash;
use types::{test_utils::test_random_instance, Hash256};
type E = types::MainnetEthSpec;
const NUM_ELEMENTS: usize = 8;
fn get_attestation(slot: Slot, beacon_block_root: u64) -> Attestation<E> {
let mut a: Attestation<E> = test_random_instance();
a.data.slot = slot;
a.data.beacon_block_root = Hash256::from_low_u64_be(beacon_block_root);
a
}
fn single_slot_test(store: &ObservedAttestations<E>, slot: Slot) {
let attestations = (0..NUM_ELEMENTS as u64)
.map(|i| get_attestation(slot, i))
.collect::<Vec<_>>();
for a in &attestations {
assert_eq!(
store.is_known(a, a.tree_hash_root()),
Ok(false),
"should indicate an unknown attestation is unknown"
);
assert_eq!(
store.observe_attestation(a, None),
Ok(ObserveOutcome::New),
"should observe new attestation"
);
}
for a in &attestations {
assert_eq!(
store.is_known(a, a.tree_hash_root()),
Ok(true),
"should indicate a known attestation is known"
);
assert_eq!(
store.observe_attestation(a, Some(a.tree_hash_root())),
Ok(ObserveOutcome::AlreadyKnown),
"should acknowledge an existing attestation"
);
}
}
#[test]
fn single_slot() {
let store = ObservedAttestations::default();
single_slot_test(&store, Slot::new(0));
assert_eq!(
store.sets.read().len(),
1,
"should have a single set stored"
);
assert_eq!(
store.sets.read()[0].len(),
NUM_ELEMENTS,
"set should have NUM_ELEMENTS elements"
);
}
#[test]
fn mulitple_contiguous_slots() {
let store = ObservedAttestations::default();
let max_cap = store.max_capacity();
for i in 0..max_cap * 3 {
let slot = Slot::new(i);
single_slot_test(&store, slot);
/*
* Ensure that the number of sets is correct.
*/
if i < max_cap {
assert_eq!(
store.sets.read().len(),
i as usize + 1,
"should have a {} sets stored",
i + 1
);
} else |
/*
* Ensure that each set contains the correct number of elements.
*/
for set in &store.sets.read()[..] {
assert_eq!(
set.len(),
NUM_ELEMENTS,
"each store should have NUM_ELEMENTS elements"
)
}
/*
* Ensure that all the sets have the expected slots
*/
let mut store_slots = store
.sets
.read()
.iter()
.map(|set| set.slot)
.collect::<Vec<_>>();
assert!(
store_slots.len() <= store.max_capacity() as usize,
"store size should not exceed max"
);
store_slots.sort_unstable();
let expected_slots = (i.saturating_sub(max_cap - 1)..=i)
.map(Slot::new)
.collect::<Vec<_>>();
assert_eq!(expected_slots, store_slots, "should have expected slots");
}
}
#[test]
fn mulitple_non_contiguous_slots() {
let store = ObservedAttestations::default();
let max_cap = store.max_capacity();
let to_skip = vec![1_u64, 2, 3, 5, 6, 29, 30, 31, 32, 64];
let slots = (0..max_cap * 3)
.into_iter()
.filter(|i| !to_skip.contains(i))
.collect::<Vec<_>>();
for &i in &slots {
if to_skip.contains(&i) {
continue;
}
let slot = Slot::from(i);
single_slot_test(&store, slot);
/*
* Ensure that each set contains the correct number of elements.
*/
for set in &store.sets.read()[..] {
assert_eq!(
set.len(),
NUM_ELEMENTS,
"each store should have NUM_ELEMENTS elements"
)
}
/*
* Ensure that all the sets have the expected slots
*/
let mut store_slots = store
.sets
.read()
.iter()
.map(|set| set.slot)
.collect::<Vec<_>>();
store_slots.sort_unstable();
assert!(
store_slots.len() <= store.max_capacity() as usize,
"store size should not exceed max"
);
let lowest = store.lowest_permissible_slot.read().as_u64();
let highest = slot.as_u64();
let expected_slots = (lowest..=highest)
.filter(|i| !to_skip.contains(i))
.map(Slot::new)
.collect::<Vec<_>>();
assert_eq!(
expected_slots,
&store_slots[..],
"should have expected slots"
);
}
}
}
| {
assert_eq!(
store.sets.read().len(),
max_cap as usize,
"should have max_capacity sets stored"
);
} |
support.rs | use std::{
fmt::{Display, Formatter},
ops::Deref,
};
#[derive(Clone, PartialEq, Eq, Hash, Default, Debug)]
pub struct FullyQualifiedName(Box<[Box<str>]>);
/// Fully qualified name helper
impl FullyQualifiedName {
/// # Example
///
/// ```
/// use datapet::support::FullyQualifiedName;
///
/// let name = FullyQualifiedName::new("foo");
/// assert_eq!(name.to_string(), "foo");
/// ```
pub fn new<S>(item: S) -> Self
where
S: ToString,
{
Self::new_n(Some(&item))
}
/// # Example
///
/// ```
/// use datapet::support::FullyQualifiedName;
///
/// let name = FullyQualifiedName::new_n(&["foo", "bar"]);
/// assert_eq!(name.to_string(), "foo::bar");
/// ```
pub fn new_n<'s, I, S>(items: I) -> Self
where
I: IntoIterator<Item = &'s S>,
S: ToString + 's,
{
Self(
items
.into_iter()
.map(ToString::to_string)
.inspect(|item| {
if item.contains(':') {
panic!(": forbidden in name fragment");
}
})
.map(String::into_boxed_str)
.collect::<Vec<_>>()
.into_boxed_slice(),
)
}
/// # Example
///
/// ```
/// use datapet::support::FullyQualifiedName;
///
/// let name = FullyQualifiedName::new("foo");
/// let sub = name.sub("bar");
/// assert_eq!(sub.to_string(), "foo::bar");
/// ```
pub fn sub<S>(&self, item: S) -> Self
where
S: ToString,
{
self.sub_n(Some(&item))
}
/// # Example
///
/// ```
/// use datapet::support::FullyQualifiedName;
///
/// let name = FullyQualifiedName::new("foo");
/// let sub = name.sub_n(&["bar", "more"]);
/// assert_eq!(sub.to_string(), "foo::bar::more");
/// ```
pub fn | <'s, I, S>(&self, items: I) -> Self
where
I: IntoIterator<Item = &'s S>,
S: ToString + 's,
{
Self(
self.0
.iter()
.cloned()
.chain(
items
.into_iter()
.map(ToString::to_string)
.inspect(|item| {
if item.contains(':') {
panic!(": forbidden in name fragment");
}
})
.map(String::into_boxed_str),
)
.collect::<Vec<_>>()
.into_boxed_slice(),
)
}
pub fn to_name(&self) -> proc_macro2::Ident {
format_ident!("{}", **self.last().expect("last"))
}
}
impl Display for FullyQualifiedName {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
for (i, n) in self.0.iter().enumerate() {
if i > 0 {
f.write_str("::")?;
}
f.write_str(n)?;
}
Ok(())
}
}
impl Deref for FullyQualifiedName {
type Target = Box<[Box<str>]>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub fn fields_eq<'f, F>(fields: F) -> syn::Expr
where
F: IntoIterator<Item = &'f str>,
{
let eq = Some("|a, b| ".to_string())
.into_iter()
.chain(fields.into_iter().enumerate().map(|(i, field)| {
let and = if i > 0 { " && " } else { "" };
format!("{and}a.{field}().eq(b.{field}())", and = and, field = field)
}))
.collect::<String>();
syn::parse_str::<syn::Expr>(&eq).expect("eq")
}
pub fn fields_cmp<'f, F>(fields: F) -> syn::Expr
where
F: IntoIterator<Item = &'f str>,
{
let cmp = Some("|a, b| ".to_string())
.into_iter()
.chain(fields.into_iter().enumerate().map(|(i, field)| {
let (then, end_then) = if i > 0 {
(".then_with(|| ", ")")
} else {
("", "")
};
format!(
"{then}a.{field}().cmp(b.{field}(){end_then})",
then = then,
end_then = end_then,
field = field
)
}))
.collect::<String>();
syn::parse_str::<syn::Expr>(&cmp).expect("cmp")
}
| sub_n |
iterators2.rs | // iterators2.rs
// In this module, you'll learn some of the unique advantages that iterators can offer.
// Step 1. Complete the `capitalize_first` function to pass the first two cases.
// Step 2. Apply the `capitalize_first` function to a vector of strings.
// Ensure that it returns a vector of strings as well.
// Step 3. Apply the `capitalize_first` function again to a list.
// Try to ensure it returns a single string.
// As always, there are hints if you execute `rustlings hint iterators2`!
pub fn capitalize_first(input: &str) -> String {
let mut c = input.chars();
match c.next() {
None => String::new(),
Some(first) => first.to_uppercase().collect::<String>() + c.as_str(),
}
}
pub fn capitalize_words(words: Vec<&str>) -> Vec<String> |
#[cfg(test)]
mod tests {
use super::*;
// Step 1.
// Tests that verify your `capitalize_first` function implementation
#[test]
fn test_success() {
assert_eq!(capitalize_first("hello"), "Hello");
}
#[test]
fn test_empty() {
assert_eq!(capitalize_first(""), "");
}
// Step 2.
#[test]
fn test_iterate_string_vec() {
let words = vec!["hello", "world"];
let capitalized_words: Vec<String> = capitalize_words(words);
assert_eq!(capitalized_words, ["Hello", "World"]);
}
#[test]
fn test_iterate_into_string() {
let words = vec!["hello", " ", "world"];
let capitalized_words: String = capitalize_words(words).join("");
assert_eq!(capitalized_words, "Hello World");
}
}
| {
words
.iter()
.map(|w| capitalize_first(&w))
.collect::<Vec<String>>()
} |
clickhouse_read_block.go | package clickhouse
import (
"github.com/kshvakov/clickhouse/lib/data"
)
func (ch *clickhouse) readBlock() (*data.Block, error) { |
if ch.compress {
}
var block data.Block
if err := block.Read(&ch.ServerInfo, ch.decoder); err != nil {
return nil, err
}
return &block, nil
} | if _, err := ch.decoder.String(); err != nil { // temporary table
return nil, err
} |
trail.py | from compas_cem.elements import Edge
class TrailEdge(Edge):
"""
A trail edge.
Notes
-----
If a plane is defined, it will override the absolute length of the trail edge.
However, the sign of the length (e.g. the combinatorial state) is preserved.
TODO: addexplicit combinatorial state to the signature of the constructor. | # self.attributes = {"length": length, "state": state, type": "trail", "plane": plane}
self.attributes = {"length": length, "type": "trail", "plane": plane}
def __repr__(self):
"""
"""
st = "{0}(length={1!r}, plane={2!r})"
return st.format(self.__class__.__name__, self.attributes["length"], self.attributes["plane"])
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass | """
def __init__(self, u, v, length, plane=None):
super(TrailEdge, self).__init__(u, v)
# TODO |
monosize.go | package monosize
import "fmt"
// GetFixedSize reads files size as float64 for supporting huge file sizes
// (like ZettaByte and YottaByte) and returns user friendly file size in 6
// characters long with leading spaces (if required) using Base 2 calculation
// and file size abbreviation from Bytes to YottaBytes as string.
//
// Output will be always 6+1+2 = 9 characters long until YottaByte limit is exceeded.
// 6 characters for file size, 1 space character and 2 characters for abbreviations.
func | (fileSize float64) string {
abbreviations := []string{"B.", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
selectedAbr := 0
if fileSize < 0 {
return fmt.Sprintf("%6.2f %v", float64(0), abbreviations[0])
}
for i := range abbreviations {
if fileSize > 999 && selectedAbr < len(abbreviations)-1 {
fileSize /= 1024
selectedAbr++
} else {
selectedAbr = i
break
}
}
return fmt.Sprintf("%6.2f %v", fileSize, abbreviations[selectedAbr])
}
| GetFixedSize |
WizardForm3.js | /*
artifact generator: /wizzi/lib/artifacts/js/module/gen/main.js
primary source IttfDocument: c:\my\wizzi\v4\apps-wizzi\v4-solutions\formbuilder\ittf\webpacks\formbuilder\src\demos\reduxform\wizardform3.js.ittf
utc time: Mon, 10 Jul 2017 12:31:59 GMT
*/
'use strict';
import React from 'react';
import { Field, reduxForm } from 'redux-form';
import PropTypes from 'prop-types';
import renderField from './renderField';
import WizardFormValidate from './WizardFormValidate';
const colors = ['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet'];
const WizardForm3 = (props) => {
const {
handleSubmit,
pristine,
reset,
submitting,
previousPage
} = props;
return (
<form onSubmit={handleSubmit}>
<Field name="favoriteColor" id="favoriteColor" label="Favorite Color" type="select" component={renderField} options={colors} emptyOption={'Select a color...'}>
</Field>
<Field name="employed" label="Employed" component={renderField} type="checkbox">
</Field>
<Field name="notes" id="notes" label="Notes" value="bla bla" type="textarea" component={renderField}>
</Field>
<div>
<button type="button" className="previous" onClick={previousPage}>
</button>
<button type="submit" className="next">
Submit
</button>
</div>
</form>
)
;
}
WizardForm3.propTypes = {
handleSubmit: PropTypes.func, | previousPage: PropTypes.func
}
export default reduxForm({
form: 'wizard',
destroyOnUnmount: false,
forceUnregisterOnUnmount: true,
validate: WizardFormValidate
})(WizardForm3)
; | pristine: PropTypes.bool,
reset: PropTypes.func,
submitting: PropTypes.bool, |
demo_srim_compounddb_to_suzu.py | # coding: utf-8
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../..')) |
import suzu.matdb.srim_compounddb as compounddb
air = compounddb.Compound()
air.desc = 'Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3'
air.name = '%Air, Dry (ICRU-104)'
air.density = 0.00120484
air.mass_percentage = True
air.elems = [(6, 0.000124), (8, 0.231781), (7, 0.755267), (18, 0.012827)]
air.bonding = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
air.comment = """corrected by H. Paul, Sept. 2004
"""
air.fulltext = """*Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3
"%Air, Dry (ICRU-104)", .00120484, 4, 6, .000124, 8, .231781, 7, .755267, 18, .012827
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
$ corrected by H. Paul, Sept. 2004
$"""
water = compounddb.Compound()
water.desc = 'Water (liquid) 1.00 H-2, O-1'
water.name = 'Water_Liquid (ICRU-276)'
water.density = 1.0
water.mass_percentage = False
water.elems = [(1, 2.0), (8, 1.0)]
water.bonding = [0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
water.comment = b"""Chemical Formula: H \u00c4\u00c4 O \u00c4\u00c4 H
There is about an 8% increase in the peak of the stopping power
for ions in water vapour relative to the liquid. (The peak of the
stopping occurs at an energy of about 100 keV/amu times the 2/3
power of the ion's atomic number.) Above the peak the phase
difference begins to disappear. This calculation is for the
LIQUID phase. """.decode('cp437')
print(water.to_suzu())
print(air.to_suzu()) | |
recreate.go | package recreate
import (
"fmt"
"os"
"time"
"github.com/golang/glog" | kclient "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
deployapi "github.com/openshift/origin/pkg/deploy/api"
strat "github.com/openshift/origin/pkg/deploy/strategy"
stratsupport "github.com/openshift/origin/pkg/deploy/strategy/support"
deployutil "github.com/openshift/origin/pkg/deploy/util"
)
// RecreateDeploymentStrategy is a simple strategy appropriate as a default.
// Its behavior is to scale down the last deployment to 0, and to scale up the
// new deployment to 1.
//
// A failure to disable any existing deployments will be considered a
// deployment failure.
type RecreateDeploymentStrategy struct {
// getReplicationController knows how to get a replication controller.
getReplicationController func(namespace, name string) (*kapi.ReplicationController, error)
// scaler is used to scale replication controllers.
scaler kubectl.Scaler
// codec is used to decode DeploymentConfigs contained in deployments.
codec runtime.Codec
// hookExecutor can execute a lifecycle hook.
hookExecutor hookExecutor
// retryTimeout is how long to wait for the replica count update to succeed
// before giving up.
retryTimeout time.Duration
// retryPeriod is how often to try updating the replica count.
retryPeriod time.Duration
}
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
scaler, _ := kubectl.ScalerFor("ReplicationController", client)
return &RecreateDeploymentStrategy{
getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
scaler: scaler,
codec: codec,
hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout, codec),
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
// Deploy makes deployment active and disables oldDeployments.
func (s *RecreateDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error {
return s.DeployWithAcceptor(from, to, desiredReplicas, nil)
}
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
if err != nil {
return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
}
params := config.Spec.Strategy.RecreateParams
retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
// Execute any pre-hook.
if params != nil && params.Pre != nil {
if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil {
return fmt.Errorf("Pre hook failed: %s", err)
} else {
glog.Infof("Pre hook finished")
}
}
// Scale down the from deployment.
if from != nil {
glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from))
_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err)
}
}
// Scale up the to deployment.
if desiredReplicas > 0 {
// If an UpdateAcceptor is provided, scale up to 1 and validate the replica,
// aborting if the replica isn't acceptable.
if updateAcceptor != nil {
glog.Infof("Scaling %s to 1 before performing acceptance check", deployutil.LabelForDeployment(to))
updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err)
}
glog.Infof("Performing acceptance check of %s", deployutil.LabelForDeployment(to))
if err := updateAcceptor.Accept(updatedTo); err != nil {
return fmt.Errorf("update acceptor rejected %s: %v", deployutil.LabelForDeployment(to), err)
}
to = updatedTo
}
// Complete the scale up.
if to.Spec.Replicas != desiredReplicas {
glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas)
updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err)
}
to = updatedTo
}
}
// Execute any post-hook. Errors are logged and ignored.
if params != nil && params.Post != nil {
if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil {
util.HandleError(fmt.Errorf("post hook failed: %s", err))
} else {
glog.Infof("Post hook finished")
}
}
glog.Infof("Deployment %s successfully made active", to.Name)
return nil
}
func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *kapi.ReplicationController, replicas int, retry *kubectl.RetryParams, wait *kubectl.RetryParams) (*kapi.ReplicationController, error) {
if err := s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, wait); err != nil {
return nil, err
}
updatedDeployment, err := s.getReplicationController(deployment.Namespace, deployment.Name)
if err != nil {
return nil, err
}
return updatedDeployment, nil
}
// hookExecutor knows how to execute a deployment lifecycle hook.
type hookExecutor interface {
Execute(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error
}
// hookExecutorImpl is a pluggable hookExecutor.
type hookExecutorImpl struct {
executeFunc func(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error
}
// Execute executes the provided lifecycle hook
func (i *hookExecutorImpl) Execute(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error {
return i.executeFunc(hook, deployment, label)
} |
kapi "k8s.io/kubernetes/pkg/api" |
regexremapdotconfig.go | package atscfg
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License. | */
import (
"strings"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
)
const ContentTypeRegexRemapDotConfig = ContentTypeTextASCII
type CDNDS struct {
OrgServerFQDN string
QStringIgnore int
CacheURL string
RegexRemap string
}
func DeliveryServicesToCDNDSes(dses []tc.DeliveryServiceNullable) map[tc.DeliveryServiceName]CDNDS {
sDSes := map[tc.DeliveryServiceName]CDNDS{}
for _, ds := range dses {
if ds.OrgServerFQDN == nil || ds.QStringIgnore == nil || ds.XMLID == nil {
if ds.XMLID == nil {
log.Errorln("atscfg.DeliveryServicesToCDNDSes got unknown DS with nil values! Skipping!")
} else {
log.Errorln("atscfg.DeliveryServicesToCDNDSes got DS '" + *ds.XMLID + "' with nil values! Skipping!")
}
continue
}
sds := CDNDS{OrgServerFQDN: *ds.OrgServerFQDN, QStringIgnore: *ds.QStringIgnore}
if ds.RegexRemap != nil {
sds.RegexRemap = *ds.RegexRemap
}
if ds.CacheURL != nil {
sds.CacheURL = *ds.CacheURL
}
sDSes[tc.DeliveryServiceName(*ds.XMLID)] = sds
}
return sDSes
}
func MakeRegexRemapDotConfig(
cdnName tc.CDNName,
toToolName string, // tm.toolname global parameter (TODO: cache itself?)
toURL string, // tm.url global parameter (TODO: cache itself?)
fileName string,
dses map[tc.DeliveryServiceName]CDNDS,
) string {
text := GenericHeaderComment(string(cdnName), toToolName, toURL)
// TODO verify prefix and suffix exist, and warn if they don't? Perl doesn't
dsName := tc.DeliveryServiceName(strings.TrimSuffix(strings.TrimPrefix(fileName, "regex_remap_"), ".config"))
ds, ok := dses[dsName]
if !ok {
log.Errorln("MakeRegexRemapDotConfig: ds '" + dsName + "' not in dses, skipping!")
return text
}
text += ds.RegexRemap + "\n"
text = strings.Replace(text, `__RETURN__`, "\n", -1)
return text
} | |
entity_types.batch_delete_entity_types.js | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
// | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ** This file is automatically generated by gapic-generator-typescript. **
// ** https://github.com/googleapis/gapic-generator-typescript **
// ** All changes to this file may be overwritten. **
'use strict';
function main(parent, entityTypeNames) {
// [START dialogflow_v2_generated_EntityTypes_BatchDeleteEntityTypes_async]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* Required. The name of the agent to delete all entities types for. Format:
* `projects/<Project ID>/agent`.
*/
// const parent = 'abc123'
/**
* Required. The names entity types to delete. All names must point to the
* same agent as `parent`.
*/
// const entityTypeNames = 'abc123'
// Imports the Dialogflow library
const {EntityTypesClient} = require('@google-cloud/dialogflow').v2;
// Instantiates a client
const dialogflowClient = new EntityTypesClient();
async function callBatchDeleteEntityTypes() {
// Construct request
const request = {
parent,
entityTypeNames,
};
// Run request
const [operation] = await dialogflowClient.batchDeleteEntityTypes(request);
const [response] = await operation.promise();
console.log(response);
}
callBatchDeleteEntityTypes();
// [END dialogflow_v2_generated_EntityTypes_BatchDeleteEntityTypes_async]
}
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2)); | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, |
runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | import os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.StandardSequences.Eras import eras
def get_root_files(path):
files = os.listdir(path)
root_files = [f for f in files if f.endswith(".root")]
full_paths = [os.path.join(path, f) for f in root_files]
urls = ['file://{0}'.format(f) for f in full_paths]
return urls
options = VarParsing('analysis')
options.register(
'sample',
'TTJet',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
)
options.setDefault('maxEvents', 2000)
options.setDefault(
'outputFile', 'L1TOffline_L1TStage2CaloLayer2_job1_RAW2DIGI_RECO_DQM.root')
options.parseArguments()
inputFiles = {
'TTJet': get_root_files('/data/TTJet/reco'),
'DoubleEG': get_root_files('/data/DoubleEG'),
}
inputFilesRAW = {
'TTJet': get_root_files('/data/TTJet/raw'),
}
process = cms.Process('L1TStage2EmulatorDQM', eras.Run2_2016)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load(
'Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# load DQM
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = int(options.maxEvents / 10)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source(
"PoolSource",
fileNames=cms.untracked.vstring(inputFiles[options.sample]),
)
if options.sample == 'TTJet':
process.source.secondaryFileNames = cms.untracked.vstring(inputFilesRAW[
'TTJet'])
process.options = cms.untracked.PSet(
)
# Output definition
process.DQMoutput = cms.OutputModule(
"DQMRootOutputModule",
fileName=cms.untracked.string(options.outputFile)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
if options.sample == 'TTJet':
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
else:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.load('DQMOffline.L1Trigger.L1TEtSumJetOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TEGammaOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TTauOffline_cfi')
if os.environ.get('DEBUG', False):
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring(
'*',
)
# pfMETT1 from https://github.com/cms-sw/cmssw/blob/master/DQMOffline/JetMET/python/jetMETDQMOfflineSource_cff.py#L109,
# is difficult to set up, let's use pfMet for testing
process.l1tPFMetNoMuForDQM.pfMETCollection = 'pfMet'
process.dqmoffline_step = cms.Path(
process.goodPFJetsForL1T *
process.l1tPFMetNoMuForDQM *
process.l1tEtSumJetOfflineDQMEmu +
process.l1tEtSumJetOfflineDQM +
process.l1tEGammaOfflineDQM +
process.l1tEGammaOfflineDQMEmu +
process.l1tTauOfflineDQM + | process.l1tTauOfflineDQMEmu
)
if options.sample != 'TTJet':
process.dqmoffline_step.remove(process.l1tEtSumJetOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tEGammaOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tTauOfflineDQMEmu)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(
process.raw2digi_step,
)
# customisation of the process.
# Automatic addition of the customisation function from
# L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
# call to customisation function L1TReEmulFromRAW imported from
# L1Trigger.Configuration.customiseReEmul
# complains about
# AttributeError: 'Process' object has no attribute 'simRctDigis'
# process = L1TReEmulFromRAW(process)
process.schedule.append(process.dqmoffline_step)
process.schedule.append(process.DQMoutput_step) | |
IDialogPropTypes.tsx | import { ReactNode, CSSProperties, SyntheticEvent } from 'react';
interface IDialogPropTypes {
className?: string;
keyboard?: boolean;
style?: CSSProperties;
mask?: boolean;
children?: any;
afterClose?: () => any;
onClose?: (e: SyntheticEvent<HTMLDivElement>) => any;
closable?: boolean;
maskClosable?: boolean;
visible?: boolean;
destroyOnClose ?: boolean;
mousePosition?: { | };
title?: ReactNode;
footer?: ReactNode;
transitionName?: string;
maskTransitionName?: string;
animation?: any;
maskAnimation?: any;
wrapStyle?: {};
bodyStyle?: {};
maskStyle?: {};
prefixCls?: string;
wrapClassName?: string;
width?: number;
height?: number;
zIndex?: number;
bodyProps?: any;
maskProps?: any;
wrapProps?: any;
getContainer?: () => HTMLElement;
closeIcon?: ReactNode;
forceRender?: boolean;
}
export default IDialogPropTypes; | x: number,
y: number, |
TreeLSTM.py | # Copyright 2018-2021 Xiang Yu(x-yu17(at)mails.tsinghua.edu.cn)
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import torch
from torch.nn import init
import torchfold
import torch.nn as nn
from ImportantConfig import Config
config = Config()
class TreeLSTM(nn.Module):
def __init__(self, num_units):
super(TreeLSTM, self).__init__()
self.num_units = num_units
self.FC1 = nn.Linear(num_units, 5 * num_units)
self.FC2 = nn.Linear(num_units, 5 * num_units)
self.FC0 = nn.Linear(num_units, 5 * num_units)
self.LNh = nn.LayerNorm(num_units,elementwise_affine = False)
self.LNc = nn.LayerNorm(num_units,elementwise_affine = False)
def forward(self, left_in, right_in,inputX):
lstm_in = self.FC1(left_in[0])
lstm_in += self.FC2(right_in[0])
lstm_in += self.FC0(inputX)
a, i, f1, f2, o = lstm_in.chunk(5, 1)
c = (a.tanh() * i.sigmoid() + f1.sigmoid() * left_in[1] +
f2.sigmoid() * right_in[1])
h = o.sigmoid() * c.tanh()
h = self.LNh(h)
return h,c
class TreeRoot(nn.Module):
def __init__(self,num_units):
super(TreeRoot, self).__init__()
self.num_units = num_units
self.FC = nn.Linear(num_units, num_units)
if config.rootPool == 'meanPool':
self.sum_pooling = nn.AdaptiveAvgPool2d((1,num_units))
else:
self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))
# self.sum_pooling = nn.AdaptiveMaxPool2d((1,num_units))
# self.max_pooling = nn.AdaptiveAvgPool2d((1,num_units))
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, tree_list):
return self.relu(self.FC(self.sum_pooling(tree_list)).view(-1,self.num_units))
class SPINN(nn.Module):
def __init__(self, n_classes, size, n_words, mask_size,device,max_column_in_table = 15):
super(SPINN, self).__init__()
self.size = size
self.tree_lstm = TreeLSTM(size)
self.tree_root = TreeRoot(size)
self.FC = nn.Linear(size*2, size)
self.table_embeddings = nn.Embedding(n_words, size)#2 * max_column_in_table * size)
self.column_embeddings = nn.Embedding(n_words, (1+2 * max_column_in_table) * size)
self.out = nn.Linear(size*2, size)
self.out2 = nn.Linear(size, n_classes)
self.outFc = nn.Linear(mask_size, size)
if config.rootPool == 'meanPool':
self.max_pooling = nn.AdaptiveAvgPool2d((1,size))
else: | self.max_pooling = nn.AdaptiveMaxPool2d((1,size))
self.relu = nn.ReLU()
self.sigmoid = nn.ReLU()
self.leafFC = nn.Linear(size, size)
self.sigmoid = nn.Sigmoid()
self.LN1 = nn.LayerNorm(size,)
self.LN2 = nn.LayerNorm(size,)
self.max_column_in_table = max_column_in_table
self.leafLn = nn.LayerNorm(size,elementwise_affine = False)
self.device = device
self.sigmoid = nn.Sigmoid()
def leaf(self, word_id, table_fea=None):
# print('tlstm_wi',word_id)
all_columns = table_fea.view(-1,self.max_column_in_table*2+1,1)*self.column_embeddings(word_id).reshape(-1,2 * self.max_column_in_table+1,self.size)
all_columns = self.relu(self.leafFC(all_columns))
table_emb = self.max_pooling(all_columns.view(-1,self.max_column_in_table*2+1,self.size)).view(-1,self.size)
return self.leafLn(table_emb), torch.zeros(word_id.size()[0], self.size,device = self.device,dtype = torch.float32)
def inputX(self,left_emb,right_emb):
cat_emb = torch.cat([left_emb,right_emb],dim = 1)
return self.relu(self.FC(cat_emb))
def childrenNode(self, left_h, left_c, right_h, right_c,inputX):
return self.tree_lstm((left_h, left_c), (right_h, right_c),inputX)
def root(self,tree_list):
return self.tree_root(tree_list).view(-1,self.size)
def logits(self, encoding,join_matrix,prt=False):
encoding = self.root(encoding.view(1,-1,self.size))
# if prt:
# print(encoding)
matrix = self.relu(self.outFc(join_matrix))
# outencoding = torch.cat([encoding,encoding],dim = 1)
outencoding = torch.cat([encoding,matrix],dim = 1)
return self.out2(self.relu(self.out(outencoding))) | self.max_pooling = nn.AdaptiveMaxPool2d((1,size)) |
context.ts | import Debug from 'debug';
import { EventEmitter } from 'events';
import { ApplicationConfig } from '../model/application';
const debug = Debug('server:runtime-ctx');
type ErrorInfo = {
timestamp: Date;
message: string;
ex: any;
};
const MAX_RESERVE_ERRORS = 100;
export class | {
private envConf: ApplicationConfig;
private envErrors: ErrorInfo[];
private runContext: Object = new EventEmitter();
constructor(conf: ApplicationConfig) {
this.envConf = conf;
this.envErrors = [];
this.setPropertyToRunTime('_envConf', this.envConf);
}
public setPropertyToRunTime(key: string, obj: any) {
debug(`define runtime args: ${key}`);
Object.defineProperty(this.runContext, key, {
enumerable: true,
writable: true,
value: obj
});
}
public getRunTimeEnv() {
return this.runContext;
}
public appendError(ex: any) {
const err = {
timestamp: new Date(),
message: `context(${this.envConf.name}) runtime error: ${ex.message}`,
ex
};
debug(`${err.message}, error stack: ${ex.stack}`);
let total = this.envErrors.push();
if (total > MAX_RESERVE_ERRORS) {
this.envErrors.shift();
}
}
public getErrorInfo(total: number = 1) {
return this.envErrors.slice(-total);
}
} | RunTimeEnvironment |
test_mirror.py | # This test suite covers the functionality of mirror feature in SwSS
import distro
import pytest
import time
from swsscommon import swsscommon
from distutils.version import StrictVersion
class TestMirror(object):
def setup_db(self, dvs):
self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0)
self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0)
self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0)
self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0)
def set_interface_status(self, dvs, interface, admin_status):
if interface.startswith("PortChannel"):
tbl_name = "PORTCHANNEL"
elif interface.startswith("Vlan"):
tbl_name = "VLAN"
else:
tbl_name = "PORT"
tbl = swsscommon.Table(self.cdb, tbl_name)
fvs = swsscommon.FieldValuePairs([("admin_status", "up")])
tbl.set(interface, fvs)
time.sleep(1)
# when using FRR, route cannot be inserted if the neighbor is not
# connected. thus it is mandatory to force the interface up manually
if interface.startswith("PortChannel"):
dvs.runcmd("bash -c 'echo " + ("1" if admin_status == "up" else "0") +\
" > /sys/class/net/" + interface + "/carrier'")
def add_ip_address(self, interface, ip):
if interface.startswith("PortChannel"):
tbl_name = "PORTCHANNEL_INTERFACE"
elif interface.startswith("Vlan"):
tbl_name = "VLAN_INTERFACE"
else:
tbl_name = "INTERFACE"
tbl = swsscommon.Table(self.cdb, tbl_name)
fvs = swsscommon.FieldValuePairs([("NULL", "NULL")])
tbl.set(interface + "|" + ip, fvs)
tbl.set(interface, fvs)
time.sleep(1)
def remove_ip_address(self, interface, ip):
if interface.startswith("PortChannel"):
tbl_name = "PORTCHANNEL_INTERFACE"
elif interface.startswith("Vlan"):
tbl_name = "VLAN_INTERFACE"
else:
tbl_name = "INTERFACE"
tbl = swsscommon.Table(self.cdb, tbl_name)
tbl._del(interface + "|" + ip)
tbl._del(interface)
time.sleep(1)
def add_neighbor(self, interface, ip, mac):
tbl = swsscommon.ProducerStateTable(self.pdb, "NEIGH_TABLE")
fvs = swsscommon.FieldValuePairs([("neigh", mac),
("family", "IPv4")])
tbl.set(interface + ":" + ip, fvs)
time.sleep(1)
def remove_neighbor(self, interface, ip):
tbl = swsscommon.ProducerStateTable(self.pdb, "NEIGH_TABLE")
tbl._del(interface + ":" + ip)
time.sleep(1)
def add_route(self, dvs, prefix, nexthop):
dvs.runcmd("ip route add " + prefix + " via " + nexthop)
time.sleep(1)
def remove_route(self, dvs, prefix):
dvs.runcmd("ip route del " + prefix)
time.sleep(1)
def create_mirror_session(self, name, src, dst, gre, dscp, ttl, queue):
tbl = swsscommon.Table(self.cdb, "MIRROR_SESSION")
fvs = swsscommon.FieldValuePairs([("src_ip", src),
("dst_ip", dst),
("gre_type", gre),
("dscp", dscp),
("ttl", ttl),
("queue", queue)])
tbl.set(name, fvs)
time.sleep(1)
def remove_mirror_session(self, name):
tbl = swsscommon.Table(self.cdb, "MIRROR_SESSION")
tbl._del(name)
time.sleep(1)
def get_mirror_session_status(self, name):
return self.get_mirror_session_state(name)["status"]
def get_mirror_session_state(self, name):
tbl = swsscommon.Table(self.sdb, "MIRROR_SESSION_TABLE")
(status, fvs) = tbl.get(name)
assert status == True
assert len(fvs) > 0
return { fv[0]: fv[1] for fv in fvs }
def check_syslog(self, dvs, marker, log, expected_cnt):
(ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)])
assert out.strip() == str(expected_cnt)
def test_MirrorAddRemove(self, dvs, testlog):
"""
This test covers the basic mirror session creation and removal operations
Operation flow:
1. Create mirror session
The session remains inactive because no nexthop/neighbor exists
2. Bring up port; assign IP; create neighbor; create route
The session remains inactive until the route is created
3. Remove route; remove neighbor; remove IP; bring down port
The session becomes inactive again till the end
4. Remove miror session
"""
self.setup_db(dvs)
session = "TEST_SESSION"
marker = dvs.add_log_marker()
# create mirror session
self.create_mirror_session(session, "1.1.1.1", "2.2.2.2", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "inactive"
self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 2.2.2.2", 1)
# bring up Ethernet16
self.set_interface_status(dvs, "Ethernet16", "up")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# add IP address to Ethernet16
self.add_ip_address("Ethernet16", "10.0.0.0/31")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# add neighbor to Ethernet16
self.add_neighbor("Ethernet16", "10.0.0.1", "02:04:06:08:10:12")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# add route to mirror destination via 10.0.0.1
self.add_route(dvs, "2.2.2.2", "10.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "active"
assert self.get_mirror_session_state(session)["monitor_port"] == "Ethernet16"
assert self.get_mirror_session_state(session)["dst_mac"] == "02:04:06:08:10:12"
assert self.get_mirror_session_state(session)["route_prefix"] == "2.2.2.2/32"
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
mirror_entries = tbl.getKeys()
assert len(mirror_entries) == 1
(status, fvs) = tbl.get(mirror_entries[0])
assert status == True
assert len(fvs) == 11
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet16"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TYPE":
assert fv[1] == "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE":
assert fv[1] == "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION":
assert fv[1] == "4"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TOS":
assert fv[1] == "32"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TTL":
assert fv[1] == "100"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS":
assert fv[1] == "1.1.1.1"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS":
assert fv[1] == "2.2.2.2"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS":
assert fv[1] == dvs.runcmd("bash -c \"ip link show eth0 | grep ether | awk '{print $2}'\"")[1].strip().upper()
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "02:04:06:08:10:12"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE":
assert fv[1] == "25944" # 0x6558
else:
assert False
# remove route
self.remove_route(dvs, "2.2.2.2")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove neighbor
self.remove_neighbor("Ethernet16", "10.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove IP address
self.remove_ip_address("Ethernet16", "10.0.0.0/31")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# bring down Ethernet16
self.set_interface_status(dvs, "Ethernet16", "down")
assert self.get_mirror_session_state(session)["status"] == "inactive"
marker = dvs.add_log_marker()
# remove mirror session
self.remove_mirror_session(session)
self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 2.2.2.2", 1)
def create_vlan(self, dvs, vlan):
#dvs.runcmd("ip link del Bridge")
#dvs.runcmd("ip link add Bridge up type bridge")
tbl = swsscommon.Table(self.cdb, "VLAN")
fvs = swsscommon.FieldValuePairs([("vlanid", vlan)])
tbl.set("Vlan" + vlan, fvs)
time.sleep(1)
def remove_vlan(self, vlan):
tbl = swsscommon.Table(self.cdb, "VLAN")
tbl._del("Vlan" + vlan)
time.sleep(1)
def create_vlan_member(self, vlan, interface):
tbl = swsscommon.Table(self.cdb, "VLAN_MEMBER")
fvs = swsscommon.FieldValuePairs([("tagging_mode", "untagged")])
tbl.set("Vlan" + vlan + "|" + interface, fvs)
time.sleep(1)
def remove_vlan_member(self, vlan, interface):
tbl = swsscommon.Table(self.cdb, "VLAN_MEMBER")
tbl._del("Vlan" + vlan + "|" + interface)
time.sleep(1)
def create_fdb(self, vlan, mac, interface):
tbl = swsscommon.ProducerStateTable(self.pdb, "FDB_TABLE")
fvs = swsscommon.FieldValuePairs([("port", interface),
("type", "dynamic")])
tbl.set("Vlan" + vlan + ":" + mac, fvs)
time.sleep(1)
def remove_fdb(self, vlan, mac):
tbl = swsscommon.ProducerStateTable(self.pdb, "FDB_TABLE")
tbl._del("Vlan" + vlan + ":" + mac)
time.sleep(1)
# Ignore testcase in Debian Jessie
# TODO: Remove this skip if Jessie support is no longer needed
@pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
def test_MirrorToVlanAddRemove(self, dvs, testlog):
"""
This test covers basic mirror session creation and removal operation
with destination port sits in a VLAN
Opeartion flow:
1. Create mirror session
2. Create VLAN; assign IP; create neighbor; create FDB
The session should be up only at this time.
3. Remove FDB; remove neighbor; remove IP; remove VLAN
4. Remove mirror session
"""
self.setup_db(dvs)
session = "TEST_SESSION"
marker = dvs.add_log_marker()
# create mirror session
self.create_mirror_session(session, "5.5.5.5", "6.6.6.6", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "inactive"
self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 6.6.6.6", 1)
# create vlan; create vlan member
self.create_vlan(dvs, "6")
self.create_vlan_member("6", "Ethernet4")
# bring up vlan and member
self.set_interface_status(dvs, "Vlan6", "up")
self.set_interface_status(dvs, "Ethernet4", "up")
# add ip address to vlan 6
self.add_ip_address("Vlan6", "6.6.6.0/24")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# create neighbor to vlan 6
self.add_neighbor("Vlan6", "6.6.6.6", "66:66:66:66:66:66")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# create fdb entry to ethernet4
self.create_fdb("6", "66-66-66-66-66-66", "Ethernet4")
assert self.get_mirror_session_state(session)["status"] == "active"
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
mirror_entries = tbl.getKeys()
assert len(mirror_entries) == 1
(status, fvs) = tbl.get(mirror_entries[0])
assert status == True
assert len(fvs) == 16
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet4"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TYPE":
assert fv[1] == "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE":
assert fv[1] == "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION":
assert fv[1] == "4"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TOS":
assert fv[1] == "32"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TTL":
assert fv[1] == "100"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS":
assert fv[1] == "5.5.5.5"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS":
assert fv[1] == "6.6.6.6"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS":
assert fv[1] == dvs.runcmd("bash -c \"ip link show eth0 | grep ether | awk '{print $2}'\"")[1].strip().upper()
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "66:66:66:66:66:66"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE":
assert fv[1] == "25944" # 0x6558
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID":
assert fv[1] == "true"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_TPID":
assert fv[1] == "33024"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_ID":
assert fv[1] == "6"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_PRI":
assert fv[1] == "0"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_CFI":
assert fv[1] == "0"
else:
assert False
# remove fdb entry
self.remove_fdb("6", "66-66-66-66-66-66")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove neighbor
self.remove_neighbor("Vlan6", "6.6.6.6")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove ip address
self.remove_ip_address("Vlan6", "6.6.6.0/24")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# bring down vlan and member
self.set_interface_status(dvs, "Ethernet4", "down")
self.set_interface_status(dvs, "Vlan6", "down")
# remove vlan member; remove vlan
self.remove_vlan_member("6", "Ethernet4")
self.remove_vlan("6")
marker = dvs.add_log_marker()
# remove mirror session
self.remove_mirror_session(session)
self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 6.6.6.6", 1)
def create_port_channel(self, dvs, channel):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_TABLE")
fvs = swsscommon.FieldValuePairs([("admin", "up"), ("mtu", "9100")])
tbl.set("PortChannel" + channel, fvs)
dvs.runcmd("ip link add PortChannel" + channel + " type bond")
tbl = swsscommon.Table(self.sdb, "LAG_TABLE")
fvs = swsscommon.FieldValuePairs([("state", "ok")])
tbl.set("PortChannel" + channel, fvs)
time.sleep(1)
def remove_port_channel(self, dvs, channel):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_TABLE")
tbl._del("PortChannel" + channel)
dvs.runcmd("ip link del PortChannel" + channel)
tbl = swsscommon.Table(self.sdb, "LAG_TABLE")
tbl._del("PortChannel" + channel)
time.sleep(1)
def create_port_channel_member(self, channel, interface):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_MEMBER_TABLE")
fvs = swsscommon.FieldValuePairs([("status", "enabled")])
tbl.set("PortChannel" + channel + ":" + interface, fvs)
time.sleep(1)
def remove_port_channel_member(self, channel, interface):
tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_MEMBER_TABLE")
tbl._del("PortChannel" + channel + ":" + interface)
time.sleep(1)
def test_MirrorToLagAddRemove(self, dvs, testlog):
"""
This test covers basic mirror session creation and removal operations
with destination port sits in a LAG
Operation flow:
1. Create mirror sesion
2. Create LAG; assign IP; create directly connected neighbor
The session shoudl be up only at this time.
3. Remove neighbor; remove IP; remove LAG
4. Remove mirror session
"""
self.setup_db(dvs)
session = "TEST_SESSION"
marker = dvs.add_log_marker()
# create mirror session
self.create_mirror_session(session, "10.10.10.10", "11.11.11.11", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "inactive"
self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 11.11.11.11", 1)
# create port channel; create port channel member
self.create_port_channel(dvs, "008")
self.create_port_channel_member("008", "Ethernet88")
# bring up port channel and port channel member
self.set_interface_status(dvs, "PortChannel008", "up")
self.set_interface_status(dvs, "Ethernet88", "up")
# add ip address to port channel 008
self.add_ip_address("PortChannel008", "11.11.11.0/24")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# create neighbor to port channel 008
self.add_neighbor("PortChannel008", "11.11.11.11", "88:88:88:88:88:88")
assert self.get_mirror_session_state(session)["status"] == "active"
# check asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet88"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "88:88:88:88:88:88"
# remove neighbor
self.remove_neighbor("PortChannel008", "11.11.11.11")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove ip address
self.remove_ip_address("PortChannel008", "11.11.11.0/24")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# bring down port channel and port channel member
self.set_interface_status(dvs, "PortChannel008", "down")
self.set_interface_status(dvs, "Ethernet88", "down")
# remove port channel member; remove port channel
self.remove_port_channel_member("008", "Ethernet88")
self.remove_port_channel(dvs, "008")
marker = dvs.add_log_marker()
# remove mirror session
self.remove_mirror_session(session)
self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 11.11.11.11", 1)
# Ignore testcase in Debian Jessie
# TODO: Remove this skip if Jessie support is no longer needed
@pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support")
def test_MirrorDestMoveVlan(self, dvs, testlog):
"""
This test tests mirror session destination move from non-VLAN to VLAN
and back to non-VLAN port
1. Create mirror session
2. Enable non-VLAN monitor port
3. Create VLAN; move to VLAN without FDB entry
4. Create FDB entry
5. Remove FDB entry
6. Remove VLAN; move to non-VLAN
7. Disable non-VLAN monitor port
8. Remove mirror session
"""
self.setup_db(dvs)
session = "TEST_SESSION"
# create mirror session
self.create_mirror_session(session, "7.7.7.7", "8.8.8.8", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# bring up port; add ip; add neighbor; add route
self.set_interface_status(dvs, "Ethernet32", "up")
self.add_ip_address("Ethernet32", "80.0.0.0/31")
self.add_neighbor("Ethernet32", "80.0.0.1", "02:04:06:08:10:12")
self.add_route(dvs, "8.8.0.0/16", "80.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID":
assert fv[1] == "false"
# mirror session move round 1
# create vlan; create vlan member; bring up vlan and member
self.create_vlan(dvs, "9")
self.create_vlan_member("9", "Ethernet48")
self.set_interface_status(dvs, "Vlan9", "up")
self.set_interface_status(dvs, "Ethernet48", "up")
assert self.get_mirror_session_state(session)["status"] == "active"
# add ip address to vlan 9
self.add_ip_address("Vlan9", "8.8.8.0/24")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# create neighbor to vlan 9
self.add_neighbor("Vlan9", "8.8.8.8", "88:88:88:88:88:88")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# create fdb entry to ethernet48
self.create_fdb("9", "88-88-88-88-88-88", "Ethernet48")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet48"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID":
assert fv[1] == "true"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_TPID":
assert fv[1] == "33024"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_ID":
assert fv[1] == "9"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_PRI":
assert fv[1] == "0"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_CFI":
assert fv[1] == "0"
# mirror session move round 2
# remove fdb entry
self.remove_fdb("9", "88-88-88-88-88-88")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove neighbor
self.remove_neighbor("Vlan9", "8.8.8.8")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove ip address
self.remove_ip_address("Vlan9", "8.8.8.0/24")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32"
elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID":
assert fv[1] == "false"
# bring down vlan and member; remove vlan member; remove vlan
self.set_interface_status(dvs, "Ethernet48", "down")
self.set_interface_status(dvs, "Vlan9", "down")
self.remove_vlan_member("9", "Ethernet48")
self.remove_vlan("9")
# remove route; remove neighbor; remove ip; bring down port
self.remove_route(dvs, "8.8.8.0/24")
self.remove_neighbor("Ethernet32", "80.0.0.1")
self.remove_ip_address("Ethernet32", "80.0.0.0/31")
self.set_interface_status(dvs, "Ethernet32", "down")
# remove mirror session
self.remove_mirror_session(session) | This test tests mirror session destination move from non-LAG to LAG
and back to non-LAG port
1. Create mirror session
2. Enable non-LAG monitor port
3. Create LAG; move to LAG with one member
4. Remove LAG member
5. Create LAG member
6. Remove LAG; move to non-LAG
7. Disable non-LAG monitor port
8. Remove mirror session
"""
self.setup_db(dvs)
session = "TEST_SESSION"
# create mirror session
self.create_mirror_session(session, "12.12.12.12", "13.13.13.13", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# bring up port; add ip; add neighbor; add route
self.set_interface_status(dvs, "Ethernet64", "up")
self.add_ip_address("Ethernet64", "100.0.0.0/31")
self.add_neighbor("Ethernet64", "100.0.0.1", "02:04:06:08:10:12")
self.add_route(dvs, "13.13.0.0/16", "100.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet64"
if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "02:04:06:08:10:12"
# mirror session move round 1
# create port channel; create port channel member; bring up
self.create_port_channel(dvs, "080")
self.create_port_channel_member("080", "Ethernet32")
self.set_interface_status(dvs, "PortChannel080", "up")
self.set_interface_status(dvs, "Ethernet32", "up")
# add ip address to port channel 080; create neighbor to port channel 080
self.add_ip_address("PortChannel080", "200.0.0.0/31")
self.add_neighbor("PortChannel080", "200.0.0.1", "12:10:08:06:04:02")
assert self.get_mirror_session_state(session)["status"] == "active"
# add route
self.add_route(dvs, "13.13.13.0/24", "200.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32"
if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "12:10:08:06:04:02"
# mirror session move round 2
# remove port channel member
self.remove_port_channel_member("080", "Ethernet32")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# mirror session move round 3
# create port channel member
self.create_port_channel_member("080", "Ethernet32")
assert self.get_mirror_session_state(session)["status"] == "active"
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32"
if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "12:10:08:06:04:02"
# mirror session move round 4
# remove route
self.remove_route(dvs, "13.13.13.0/24")
assert self.get_mirror_session_state(session)["status"] == "active"
port_oid = ""
# check monitor port
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
(status, fvs) = tbl.get(tbl.getKeys()[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT":
assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet64"
if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS":
assert fv[1] == "02:04:06:08:10:12"
# remove neighbor; remove ip address to port channel 080
self.remove_neighbor("PortChannel080", "200.0.0.1")
self.remove_ip_address("PortChannel080", "200.0.0.0/31")
# bring down; remove port channel member; remove port channel
self.set_interface_status(dvs, "Ethernet32", "down")
self.set_interface_status(dvs, "PortChannel080", "down")
self.remove_port_channel_member("080", "Ethernet32")
self.remove_port_channel(dvs, "080")
assert self.get_mirror_session_state(session)["status"] == "active"
# remove route; remove neighbor; remove ip; bring down port
self.remove_route(dvs, "13.13.0.0/16")
self.remove_neighbor("Ethernet64", "100.0.0.1")
self.remove_ip_address("Ethernet64", "100.0.0.0/31")
self.set_interface_status(dvs, "Ethernet64", "down")
assert self.get_mirror_session_state(session)["status"] == "inactive"
# remove mirror session
self.remove_mirror_session(session)
def create_acl_table(self, table, interfaces):
tbl = swsscommon.Table(self.cdb, "ACL_TABLE")
fvs = swsscommon.FieldValuePairs([("policy_desc", "mirror_test"),
("type", "mirror"),
("ports", ",".join(interfaces))])
tbl.set(table, fvs)
time.sleep(1)
def remove_acl_table(self, table):
tbl = swsscommon.Table(self.cdb, "ACL_TABLE")
tbl._del(table)
time.sleep(1)
def create_mirror_acl_dscp_rule(self, table, rule, dscp, session, stage=None):
action_name = "mirror_action"
action_name_map = {
"ingress": "MIRROR_INGRESS_ACTION",
"egress": "MIRROR_EGRESS_ACTION"
}
if stage is not None: # else it should be treated as ingress by default in orchagent
assert stage in ('ingress', 'egress'), "invalid stage input {}".format(stage)
action_name = action_name_map[stage]
tbl = swsscommon.Table(self.cdb, "ACL_RULE")
fvs = swsscommon.FieldValuePairs([("priority", "1000"),
(action_name, session),
("DSCP", dscp)])
tbl.set(table + "|" + rule, fvs)
time.sleep(1)
def remove_mirror_acl_dscp_rule(self, table, rule):
tbl = swsscommon.Table(self.cdb, "ACL_RULE")
tbl._del(table + "|" + rule)
time.sleep(1)
def test_AclBindMirrorPerStage(self, dvs, testlog):
"""
This test configures mirror rules with specifying explicitely
the mirror action stage (ingress, egress) and verifies ASIC db
entry set with correct mirror action
"""
self.setup_db(dvs)
session = "MIRROR_SESSION"
acl_table = "MIRROR_TABLE"
acl_rule = "MIRROR_RULE"
# bring up port; assign ip; create neighbor; create route
self.set_interface_status(dvs, "Ethernet32", "up")
self.add_ip_address("Ethernet32", "20.0.0.0/31")
self.add_neighbor("Ethernet32", "20.0.0.1", "02:04:06:08:10:12")
self.add_route(dvs, "4.4.4.4", "20.0.0.1")
# create mirror session
self.create_mirror_session(session, "3.3.3.3", "4.4.4.4", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == "active"
# assert mirror session in asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
mirror_session_oid = tbl.getKeys()[0]
# create acl table
self.create_acl_table(acl_table, ["Ethernet0", "Ethernet4"])
for stage, asic_attr in (("ingress", "SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS"),
("egress", "SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS")):
# create acl rule with dscp value 48
self.create_mirror_acl_dscp_rule(acl_table, acl_rule, "48", session, stage=stage)
# assert acl rule is created
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
rule_entries = [k for k in tbl.getKeys() if k not in dvs.asicdb.default_acl_entries]
assert len(rule_entries) == 1
(status, fvs) = tbl.get(rule_entries[0])
assert status == True
asic_attr_found = False
for fv in fvs:
if fv[0] == asic_attr:
asic_attr_found = True
assert asic_attr_found == True
# remove acl rule
self.remove_mirror_acl_dscp_rule(acl_table, acl_rule)
# remove acl table
self.remove_acl_table(acl_table)
# remove mirror session
self.remove_mirror_session(session)
# assert no mirror session in asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 0
# remove route; remove neighbor; remove ip; bring down port
self.remove_route(dvs, "4.4.4.4")
self.remove_neighbor("Ethernet32", "20.0.0.1")
self.remove_ip_address("Ethernet32", "20.0.0.0/31")
self.set_interface_status(dvs, "Ethernet32", "down")
def _test_AclBindMirror(self, dvs, testlog, create_seq_test=False):
"""
This test tests ACL associated with mirror session with DSCP value
The DSCP value is tested on both with mask and without mask
"""
session = "MIRROR_SESSION"
acl_table = "MIRROR_TABLE"
acl_rule = "MIRROR_RULE"
# bring up port; assign ip; create neighbor; create route
self.set_interface_status(dvs, "Ethernet32", "up")
self.add_ip_address("Ethernet32", "20.0.0.0/31")
self.add_neighbor("Ethernet32", "20.0.0.1", "02:04:06:08:10:12")
if create_seq_test == False:
self.add_route(dvs, "4.4.4.4", "20.0.0.1")
# create mirror session
self.create_mirror_session(session, "3.3.3.3", "4.4.4.4", "0x6558", "8", "100", "0")
assert self.get_mirror_session_state(session)["status"] == ("active" if create_seq_test == False else "inactive")
# check mirror session in asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == (1 if create_seq_test == False else 0)
if create_seq_test == False:
mirror_session_oid = tbl.getKeys()[0]
# create acl table
self.create_acl_table(acl_table, ["Ethernet0", "Ethernet4"])
# create acl rule with dscp value 48
self.create_mirror_acl_dscp_rule(acl_table, acl_rule, "48", session)
# acl rule creation check
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
rule_entries = [k for k in tbl.getKeys() if k not in dvs.asicdb.default_acl_entries]
assert len(rule_entries) == (1 if create_seq_test == False else 0)
if create_seq_test == True:
self.add_route(dvs, "4.4.4.4", "20.0.0.1")
assert self.get_mirror_session_state(session)["status"] == "active"
# assert mirror session in asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 1
mirror_session_oid = tbl.getKeys()[0]
# assert acl rule is created
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
rule_entries = [k for k in tbl.getKeys() if k not in dvs.asicdb.default_acl_entries]
assert len(rule_entries) == 1
(status, fvs) = tbl.get(rule_entries[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_ACL_ENTRY_ATTR_FIELD_DSCP":
assert fv[1] == "48&mask:0x3f"
if fv[0] == "SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS":
assert fv[1] == "1:" + mirror_session_oid
# remove acl rule
self.remove_mirror_acl_dscp_rule(acl_table, acl_rule)
# create acl rule with dscp value 16/16
self.create_mirror_acl_dscp_rule(acl_table, acl_rule, "16/16", session)
# assert acl rule is created
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY")
rule_entries = [k for k in tbl.getKeys() if k not in dvs.asicdb.default_acl_entries]
assert len(rule_entries) == 1
(status, fvs) = tbl.get(rule_entries[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_ACL_ENTRY_ATTR_FIELD_DSCP":
assert fv[1] == "16&mask:0x10"
if fv[0] == "SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS":
assert fv[1] == "1:" + mirror_session_oid
# remove acl rule
self.remove_mirror_acl_dscp_rule(acl_table, acl_rule)
# remove acl table
self.remove_acl_table(acl_table)
# remove mirror session
self.remove_mirror_session(session)
# assert no mirror session in asic database
tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION")
assert len(tbl.getKeys()) == 0
# remove route; remove neighbor; remove ip; bring down port
self.remove_route(dvs, "4.4.4.4")
self.remove_neighbor("Ethernet32", "20.0.0.1")
self.remove_ip_address("Ethernet32", "20.0.0.0/31")
self.set_interface_status(dvs, "Ethernet32", "down")
def test_AclBindMirror(self, dvs, testlog):
self.setup_db(dvs)
self._test_AclBindMirror(dvs, testlog)
self._test_AclBindMirror(dvs, testlog, create_seq_test=True)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass |
def test_MirrorDestMoveLag(self, dvs, testlog):
""" |
mod.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use address::Address;
use ipld_blockstore::BlockStore;
use serde::Serialize;
use std::error::Error;
use vm::ActorState;
/// Init actor address.
pub static ADDRESS: &actorv3::INIT_ACTOR_ADDR = &actorv3::INIT_ACTOR_ADDR;
/// Init actor method.
pub type Method = actorv3::init::Method;
/// Init actor state.
#[derive(Serialize)]
#[serde(untagged)]
pub enum State {
V0(actorv0::init::State),
V2(actorv2::init::State),
V3(actorv3::init::State),
}
impl State {
pub fn load<BS>(store: &BS, actor: &ActorState) -> Result<State, Box<dyn Error>>
where
BS: BlockStore,
{
if actor.code == *actorv0::INIT_ACTOR_CODE_ID {
Ok(store
.get(&actor.state)?
.map(State::V0)
.ok_or("Actor state doesn't exist in store")?)
} else if actor.code == *actorv2::INIT_ACTOR_CODE_ID {
Ok(store
.get(&actor.state)?
.map(State::V2)
.ok_or("Actor state doesn't exist in store")?)
} else if actor.code == *actorv3::INIT_ACTOR_CODE_ID {
Ok(store
.get(&actor.state)?
.map(State::V3)
.ok_or("Actor state doesn't exist in store")?)
} else {
Err(format!("Unknown actor code {}", actor.code).into())
}
}
/// Allocates a new ID address and stores a mapping of the argument address to it.
/// Returns the newly-allocated address.
pub fn map_address_to_new_id<BS: BlockStore>(
&mut self,
store: &BS,
addr: &Address,
) -> Result<Address, Box<dyn Error>> |
/// ResolveAddress resolves an address to an ID-address, if possible.
/// If the provided address is an ID address, it is returned as-is.
/// This means that mapped ID-addresses (which should only appear as values, not keys) and
/// singleton actor addresses (which are not in the map) pass through unchanged.
///
/// Returns an ID-address and `true` if the address was already an ID-address or was resolved
/// in the mapping.
/// Returns an undefined address and `false` if the address was not an ID-address and not found
/// in the mapping.
/// Returns an error only if state was inconsistent.
pub fn resolve_address<BS: BlockStore>(
&self,
store: &BS,
addr: &Address,
) -> Result<Option<Address>, Box<dyn Error>> {
match self {
State::V0(st) => st.resolve_address(store, addr),
State::V2(st) => st.resolve_address(store, addr),
State::V3(st) => st.resolve_address(store, addr),
}
}
pub fn into_network_name(self) -> String {
match self {
State::V0(st) => st.network_name,
State::V2(st) => st.network_name,
State::V3(st) => st.network_name,
}
}
}
| {
match self {
State::V0(st) => Ok(st.map_address_to_new_id(store, addr)?),
State::V2(st) => Ok(st.map_address_to_new_id(store, addr)?),
State::V3(st) => Ok(st.map_address_to_new_id(store, addr)?),
}
} |
model_rum_event.go | /*
* Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
* This product includes software developed at Datadog (https://www.datadoghq.com/).
* Copyright 2019-Present Datadog, Inc.
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package datadog
import (
"encoding/json"
)
// RUMEvent Object description of a RUM event after being processed and stored by Datadog.
type RUMEvent struct {
Attributes *RUMEventAttributes `json:"attributes,omitempty"`
// Unique ID of the event.
Id *string `json:"id,omitempty"`
Type *RUMEventType `json:"type,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:-`
}
// NewRUMEvent instantiates a new RUMEvent object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRUMEvent() *RUMEvent {
this := RUMEvent{}
var type_ RUMEventType = RUMEVENTTYPE_RUM
this.Type = &type_
return &this
}
// NewRUMEventWithDefaults instantiates a new RUMEvent object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func | () *RUMEvent {
this := RUMEvent{}
var type_ RUMEventType = RUMEVENTTYPE_RUM
this.Type = &type_
return &this
}
// GetAttributes returns the Attributes field value if set, zero value otherwise.
func (o *RUMEvent) GetAttributes() RUMEventAttributes {
if o == nil || o.Attributes == nil {
var ret RUMEventAttributes
return ret
}
return *o.Attributes
}
// GetAttributesOk returns a tuple with the Attributes field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RUMEvent) GetAttributesOk() (*RUMEventAttributes, bool) {
if o == nil || o.Attributes == nil {
return nil, false
}
return o.Attributes, true
}
// HasAttributes returns a boolean if a field has been set.
func (o *RUMEvent) HasAttributes() bool {
if o != nil && o.Attributes != nil {
return true
}
return false
}
// SetAttributes gets a reference to the given RUMEventAttributes and assigns it to the Attributes field.
func (o *RUMEvent) SetAttributes(v RUMEventAttributes) {
o.Attributes = &v
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *RUMEvent) GetId() string {
if o == nil || o.Id == nil {
var ret string
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RUMEvent) GetIdOk() (*string, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *RUMEvent) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given string and assigns it to the Id field.
func (o *RUMEvent) SetId(v string) {
o.Id = &v
}
// GetType returns the Type field value if set, zero value otherwise.
func (o *RUMEvent) GetType() RUMEventType {
if o == nil || o.Type == nil {
var ret RUMEventType
return ret
}
return *o.Type
}
// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RUMEvent) GetTypeOk() (*RUMEventType, bool) {
if o == nil || o.Type == nil {
return nil, false
}
return o.Type, true
}
// HasType returns a boolean if a field has been set.
func (o *RUMEvent) HasType() bool {
if o != nil && o.Type != nil {
return true
}
return false
}
// SetType gets a reference to the given RUMEventType and assigns it to the Type field.
func (o *RUMEvent) SetType(v RUMEventType) {
o.Type = &v
}
func (o RUMEvent) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.UnparsedObject != nil {
return json.Marshal(o.UnparsedObject)
}
if o.Attributes != nil {
toSerialize["attributes"] = o.Attributes
}
if o.Id != nil {
toSerialize["id"] = o.Id
}
if o.Type != nil {
toSerialize["type"] = o.Type
}
return json.Marshal(toSerialize)
}
func (o *RUMEvent) UnmarshalJSON(bytes []byte) (err error) {
raw := map[string]interface{}{}
all := struct {
Attributes *RUMEventAttributes `json:"attributes,omitempty"`
Id *string `json:"id,omitempty"`
Type *RUMEventType `json:"type,omitempty"`
}{}
err = json.Unmarshal(bytes, &all)
if err != nil {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
if v := all.Type; v != nil && !v.IsValid() {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
o.Attributes = all.Attributes
o.Id = all.Id
o.Type = all.Type
return nil
}
| NewRUMEventWithDefaults |
main.go | package main
import (
"bufio"
"context"
"fmt"
"os"
"time"
cid "github.com/ipfs/go-cid"
datastore "github.com/ipfs/go-datastore"
ipfsaddr "github.com/ipfs/go-ipfs-addr"
floodsub "github.com/libp2p/go-floodsub"
libp2p "github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
peerstore "github.com/libp2p/go-libp2p-peerstore"
multihash "github.com/multiformats/go-multihash"
)
func main() | {
const TOPICNAME string = "HulusChannel"
ctx := context.Background()
// Set up a libp2p host.
host, err := libp2p.New(ctx, libp2p.Defaults)
if err != nil {
panic(err)
}
// Construct ourselves a pubsub instance using that libp2p host.
fsub, err := floodsub.NewFloodSub(ctx, host)
if err != nil {
panic(err)
}
// Using a DHT for discovery.
dht := dht.NewDHTClient(ctx, host, datastore.NewMapDatastore())
if err != nil {
panic(err)
}
bootstrapPeers := []string{
"/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
"/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
"/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
"/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu",
"/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd",
}
fmt.Println("bootstrapping...")
for _, addr := range bootstrapPeers {
iaddr, _ := ipfsaddr.ParseString(addr)
pinfo, _ := peerstore.InfoFromP2pAddr(iaddr.Multiaddr())
if err := host.Connect(ctx, *pinfo); err != nil {
fmt.Println("bootstrapping to peer failed: ", err)
}
}
// Using the sha256 of our "topic" as our rendezvous value
c, _ := cid.NewPrefixV1(cid.Raw, multihash.SHA2_256).Sum([]byte(TOPICNAME))
// First, announce ourselves as participating in this topic
fmt.Println("announcing ourselves...")
tctx, _ := context.WithTimeout(ctx, time.Second*10)
if err := dht.Provide(tctx, c, true); err != nil {
panic(err)
}
// Now, look for others who have announced
fmt.Println("searching for other peers...")
tctx, _ = context.WithTimeout(ctx, time.Second*10)
peers, err := dht.FindProviders(tctx, c)
if err != nil {
panic(err)
}
fmt.Printf("Found %d peers!\n", len(peers))
// Now connect to them!
for _, p := range peers {
if p.ID == host.ID() {
// No sense connecting to ourselves
continue
}
tctx, _ := context.WithTimeout(ctx, time.Second*5)
if err := host.Connect(tctx, p); err != nil {
fmt.Println("failed to connect to peer: ", err)
}
}
fmt.Println("bootstrapping and discovery complete!")
sub, err := fsub.Subscribe(TOPICNAME)
if err != nil {
panic(err)
}
// Go and listen for messages from them, and print them to the screen
go func() {
for {
msg, err := sub.Next(ctx)
if err != nil {
panic(err)
}
fmt.Printf("%s: %s\n", msg.GetFrom(), string(msg.GetData()))
}
}()
// Now, wait for input from the user, and send that out!
fmt.Println("Type something and hit enter to send:")
scan := bufio.NewScanner(os.Stdin)
for scan.Scan() {
if err := fsub.Publish(TOPICNAME, scan.Bytes()); err != nil {
panic(err)
}
}
} |
|
main.rs | // Copyright 2021 Flat Bartender <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
#![cfg_attr(debug_assertions, windows_subsystem = "console")]
use std::sync::Arc;
mod discord;
mod executor;
mod gensokyo_radio;
mod pipeline;
mod ui;
use discord::{discord_main_loop, DiscordControl};
#[derive(Debug, Clone)]
pub enum PlayerMessage {
Play,
Pause,
VolumeChanged(u8),
AlbumArt(Option<Vec<u8>>),
SongInfo(gensokyo_radio::GRApiAnswer),
IncrementElapsed,
}
#[derive(PartialEq, Eq)]
enum PlayerStatus {
Playing,
Paused,
}
use iced::{widget, Application, Command, Element, Settings};
struct Player {
player_status: PlayerStatus,
player_tx: tokio::sync::mpsc::UnboundedSender<pipeline::PlayerControl>,
discord_tx: std::sync::mpsc::Sender<DiscordControl>,
api_client: Arc<gensokyo_radio::ApiClient>,
volume: u8,
album_image: Option<Vec<u8>>,
current_song_info: Option<gensokyo_radio::GRApiAnswer>,
play_pause_state: widget::button::State,
volume_slider_state: widget::slider::State,
}
impl Application for Player {
type Executor = executor::TokioExecutor;
type Message = PlayerMessage;
type Flags = ();
fn new(_flags: Self::Flags) -> (Self, Command<Self::Message>) {
let player_tx = pipeline::setup_pipeline();
let player_status = PlayerStatus::Paused;
let api_client = Arc::new(gensokyo_radio::ApiClient::new());
let fut_api_client = api_client.clone();
player_tx
.send(pipeline::PlayerControl::Volume(DEFAULT_VOLUME))
.expect("Failed to set initial volume");
let (discord_tx, discord_rx) = std::sync::mpsc::channel();
discord_main_loop(discord_rx);
let commands = vec![
Command::perform(
async move { fut_api_client.get_song_info().await },
PlayerMessage::SongInfo,
),
Command::perform(
async move { tokio::time::sleep(std::time::Duration::from_secs(1)).await },
|_| PlayerMessage::IncrementElapsed,
),
];
(
Player {
player_status,
player_tx,
discord_tx,
api_client,
album_image: None,
volume: DEFAULT_VOLUME,
current_song_info: None,
play_pause_state: widget::button::State::new(),
volume_slider_state: widget::slider::State::new(),
},
Command::batch(commands),
)
}
fn title(&self) -> String {
match self.current_song_info {
None => format!("Wan Player"),
Some(ref song_info) => format!(
"Wan Player | {} - {}",
song_info.songinfo.artist, song_info.songinfo.title
),
}
}
fn update(&mut self, message: Self::Message) -> Command<Self::Message> {
use pipeline::PlayerControl;
match message {
PlayerMessage::Play => {
self.player_tx
.send(PlayerControl::Play)
.expect("Failed to send play command to Player");
self.player_status = PlayerStatus::Playing;
Command::none()
}
PlayerMessage::Pause => {
self.player_tx
.send(PlayerControl::Pause)
.expect("Failed to send pause command to Player");
self.player_status = PlayerStatus::Paused;
Command::none()
}
PlayerMessage::VolumeChanged(volume) => |
PlayerMessage::AlbumArt(opt_art) => {
self.album_image = opt_art;
Command::none()
}
PlayerMessage::SongInfo(song_info) => {
self.current_song_info = Some(song_info.clone());
self.discord_tx
.send(DiscordControl::SongInfo(song_info.clone()))
.expect("Failed to send song info to discord");
let fut_api_client = self.api_client.clone();
Command::perform(
async move { fut_api_client.get_album_image(&song_info).await },
PlayerMessage::AlbumArt,
)
}
PlayerMessage::IncrementElapsed => {
let mut commands = Vec::with_capacity(2);
if let Some(ref mut info) = self.current_song_info {
info.songtimes.played += 1;
if info.songtimes.played == info.songtimes.duration {
let fut_api_client = self.api_client.clone();
commands.push(Command::perform(
async move { fut_api_client.get_song_info().await },
PlayerMessage::SongInfo,
))
}
}
commands.push(Command::perform(
async move { tokio::time::sleep(std::time::Duration::from_secs(1)).await },
|_| PlayerMessage::IncrementElapsed,
));
Command::batch(commands)
}
}
}
fn view(&mut self) -> Element<Self::Message> {
let player = widget::Row::new();
let art_column = {
let album_image = ui::album_art_widget(&self.album_image);
let elapsed_row = ui::elapsed_widget(&self.current_song_info, self.volume);
let (svg_source, button_message) = match self.player_status {
PlayerStatus::Playing => (ui::PAUSE_SVG, PlayerMessage::Pause),
PlayerStatus::Paused => (ui::PLAY_SVG, PlayerMessage::Play),
};
let play_pause_svg = widget::Svg::new(widget::svg::Handle::from_memory(svg_source));
let play_pause = widget::Button::new(&mut self.play_pause_state, play_pause_svg)
.style(ui::PlayPauseStyle)
.on_press(button_message);
let volume_slider = widget::Slider::new(
&mut self.volume_slider_state,
0..=100,
self.volume,
PlayerMessage::VolumeChanged,
)
.style(ui::VolumeSliderStyle)
.step(1);
let controls = widget::Row::new()
.push(play_pause)
.push(volume_slider)
.spacing(8)
.align_items(iced::Align::Center);
let progress_bar = ui::progress_widget(&self.current_song_info);
widget::Column::new()
.push(album_image)
.push(progress_bar)
.push(elapsed_row)
.push(controls)
.max_width(200)
};
let info_panel = {
let type_column = widget::Column::new()
.push(widget::Space::new(iced::Length::Shrink, iced::Length::Units(48)))
.push(widget::Text::new("by").size(32).color([1.0, 1.0, 1.0, 0.5]))
.push(widget::Text::new("album").size(32).color([1.0, 1.0, 1.0, 0.5]))
.push(widget::Text::new("circle").size(32).color([1.0, 1.0, 1.0, 0.5]))
.push(widget::Text::new("year").size(32).color([1.0, 1.0, 1.0, 0.5]))
.align_items(iced::Align::End);
let value_column = widget::Column::new();
let value_column = if let Some(ref song_info) = self.current_song_info {
value_column
.push(widget::Text::new(&song_info.songinfo.title).size(48))
.push(widget::Text::new(&song_info.songinfo.artist).size(32))
.push(widget::Text::new(&song_info.songinfo.album).size(32))
.push(widget::Text::new(&song_info.songinfo.circle).size(32))
.push(widget::Text::new(&song_info.songinfo.year).size(32))
} else {
value_column.push(widget::Text::new("Fetching infos...").size(32))
};
widget::Row::new().push(type_column).push(value_column).spacing(8)
};
widget::Container::new(player.push(art_column).push(info_panel).spacing(8))
.style(ui::PlayerStyle)
.width(iced::Length::Fill)
.height(iced::Length::Fill)
.padding(8)
.into()
}
}
const DEFAULT_VOLUME: u8 = 10;
const FONT: &[u8] = include_bytes!("resources/NotoSansSC-Regular.otf");
#[tokio::main]
async fn main() {
let icon = image::load_from_memory(ui::ICON)
.expect("Failed to load icon")
.to_rgba8();
let icon_width = icon.width();
let icon_height = icon.height();
let settings = Settings {
default_font: Some(FONT),
window: iced::window::Settings {
size: (640, 294),
icon: iced::window::icon::Icon::from_rgba(icon.into_raw(), icon_width, icon_height).ok(),
..iced::window::Settings::default()
},
..Settings::default()
};
Player::run(settings).unwrap();
}
| {
self.player_tx
.send(PlayerControl::Volume(volume))
.expect("Failed to send volume command to Player");
self.volume = volume;
Command::none()
} |
cpuidle.chart.py | # -*- coding: utf-8 -*-
# Description: cpuidle netdata python.d module
# Author: Steven Noonan (tycho)
import glob
import os
import platform
import time
from base import SimpleService
import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
# default module values (can be overridden per job in `config`)
# update_every = 2
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices/system/cpu"
self.schedstat_path = prefix + "/proc/schedstat"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
self._orig_name = ""
self.assignment = {}
def __gettid(self):
# This is horrendous. We need the *thread id* (not the *process id*),
# but there's no Python standard library way of doing that. If you need
# to enable this module on a non-x86 machine type, you'll have to find
# the Linux syscall number for gettid() and add it to the dictionary
# below.
syscalls = {
'i386': 224,
'x86_64': 186,
}
if platform.machine() not in syscalls:
return None
tid = syscall(syscalls[platform.machine()])
return tid
def __wake_cpus(self):
# Requires Python 3.3+. This will "tickle" each CPU to force it to
# update its idle counters.
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
for idx in range(0, len(self.assignment)):
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
def __read_schedstat(self):
cpus = {}
for line in open(self.schedstat_path, 'r'):
if not line.startswith('cpu'):
continue
line = line.rstrip().split()
cpu = line[0]
active_time = line[7]
cpus[cpu] = int(active_time) // 1000
return cpus
def _get_data(self):
results = {}
# This line is critical for the stats to update. If we don't "tickle"
# all the CPUs, then all the counters stop counting.
self.__wake_cpus()
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
for metric, path in metrics.items():
residency = int(open(path, 'r').read())
results[metric] = residency
return results
def check(self):
if self.__gettid() is None:
self.error("Cannot get thread ID. Stats would be completely broken.")
return False
self._orig_name = self.chart_name
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
# ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
path_elem = path.split('/')
cpu = path_elem[-4]
state = path_elem[-2]
statename = open(path, 'rt').read().rstrip()
orderid = '%s_cpuidle' % (cpu,)
if orderid not in self.definitions:
self.order.append(orderid)
active_name = '%s_active_time' % (cpu,)
self.definitions[orderid] = {
'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
'lines': [
[active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
],
}
self.assignment[cpu] = {}
defid = '%s_%s_time' % (orderid, state)
self.definitions[orderid]['lines'].append(
[defid, statename, 'percentage-of-incremental-row', 1, 1]
)
self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
if len(self.definitions) == 0:
self.error("couldn't find cstate stats")
return False
return True
def create(self):
|
def update(self, interval):
self.chart_name = "cpu"
status = SimpleService.update(self, interval=interval)
self.chart_name = self._orig_name
return status
# vim: set ts=4 sts=4 sw=4 et:
| self.chart_name = "cpu"
status = SimpleService.create(self)
self.chart_name = self._orig_name
return status |
Earlybirds.d.ts | import * as React from 'react';
import { StyledIconProps } from '../../StyledIconBase';
export declare const Earlybirds: React.ForwardRefExoticComponent<Pick<StyledIconProps, "string" | "max" | "accumulate" | "origin" | "end" | "hanging" | "alphabetic" | "ideographic" | "media" | "style" | "title" | "clipPath" | "filter" | "mask" | "result" | "local" | "color" | "clip" | "size" | "fill" | "stroke" | "x" | "y" | "mathematical" | "additive" | "key" | "children" | "className" | "height" | "id" | "lang" | "method" | "min" | "name" | "target" | "type" | "width" | "role" | "tabIndex" | "accentHeight" | "alignmentBaseline" | "allowReorder" | "amplitude" | "arabicForm" | "ascent" | "attributeName" | "attributeType" | "autoReverse" | "azimuth" | "baseFrequency" | "baselineShift" | "baseProfile" | "bbox" | "begin" | "bias" | "by" | "calcMode" | "capHeight" | "clipPathUnits" | "clipRule" | "colorInterpolation" | "colorInterpolationFilters" | "colorProfile" | "colorRendering" | "contentScriptType" | "contentStyleType" | "cursor" | "cx" | "cy" | "d" | "decelerate" | "descent" | "diffuseConstant" | "direction" | "display" | "divisor" | "dominantBaseline" | "dur" | "dx" | "dy" | "edgeMode" | "elevation" | "enableBackground" | "exponent" | "externalResourcesRequired" | "fillOpacity" | "fillRule" | "filterRes" | "filterUnits" | "floodColor" | "floodOpacity" | "focusable" | "fontFamily" | "fontSize" | "fontSizeAdjust" | "fontStretch" | "fontStyle" | "fontVariant" | "fontWeight" | "format" | "from" | "fx" | "fy" | "g1" | "g2" | "glyphName" | "glyphOrientationHorizontal" | "glyphOrientationVertical" | "glyphRef" | "gradientTransform" | "gradientUnits" | "horizAdvX" | "horizOriginX" | "href" | "imageRendering" | "in2" | "in" | "intercept" | "k1" | "k2" | "k3" | "k4" | "k" | "kernelMatrix" | "kernelUnitLength" | "kerning" | "keyPoints" | "keySplines" | "keyTimes" | "lengthAdjust" | "letterSpacing" | "lightingColor" | "limitingConeAngle" | "markerEnd" | "markerHeight" | "markerMid" | "markerStart" | "markerUnits" | "markerWidth" | "maskContentUnits" | "maskUnits" | "mode" | "numOctaves" | "offset" | "opacity" | "operator" | "order" | "orient" | "orientation" | "overflow" | "overlinePosition" | "overlineThickness" | "paintOrder" | "panose1" | "pathLength" | "patternContentUnits" | "patternTransform" | "patternUnits" | "pointerEvents" | "points" | "pointsAtX" | "pointsAtY" | "pointsAtZ" | "preserveAlpha" | "preserveAspectRatio" | "primitiveUnits" | "r" | "radius" | "refX" | "refY" | "renderingIntent" | "repeatCount" | "repeatDur" | "requiredExtensions" | "requiredFeatures" | "restart" | "rotate" | "rx" | "ry" | "scale" | "seed" | "shapeRendering" | "slope" | "spacing" | "specularConstant" | "specularExponent" | "speed" | "spreadMethod" | "startOffset" | "stdDeviation" | "stemh" | "stemv" | "stitchTiles" | "stopColor" | "stopOpacity" | "strikethroughPosition" | "strikethroughThickness" | "strokeDasharray" | "strokeDashoffset" | "strokeLinecap" | "strokeLinejoin" | "strokeMiterlimit" | "strokeOpacity" | "strokeWidth" | "surfaceScale" | "systemLanguage" | "tableValues" | "targetX" | "targetY" | "textAnchor" | "textDecoration" | "textLength" | "textRendering" | "to" | "transform" | "u1" | "u2" | "underlinePosition" | "underlineThickness" | "unicode" | "unicodeBidi" | "unicodeRange" | "unitsPerEm" | "vAlphabetic" | "values" | "vectorEffect" | "version" | "vertAdvY" | "vertOriginX" | "vertOriginY" | "vHanging" | "vIdeographic" | "viewBox" | "viewTarget" | "visibility" | "vMathematical" | "widths" | "wordSpacing" | "writingMode" | "x1" | "x2" | "xChannelSelector" | "xHeight" | "xlinkActuate" | "xlinkArcrole" | "xlinkHref" | "xlinkRole" | "xlinkShow" | "xlinkTitle" | "xlinkType" | "xmlBase" | "xmlLang" | "xmlns" | "xmlnsXlink" | "xmlSpace" | "y1" | "y2" | "yChannelSelector" | "z" | "zoomAndPan" | "aria-activedescendant" | "aria-atomic" | "aria-autocomplete" | "aria-busy" | "aria-checked" | "aria-colcount" | "aria-colindex" | "aria-colspan" | "aria-controls" | "aria-current" | "aria-describedby" | "aria-details" | "aria-disabled" | "aria-dropeffect" | "aria-errormessage" | "aria-expanded" | "aria-flowto" | "aria-grabbed" | "aria-haspopup" | "aria-hidden" | "aria-invalid" | "aria-keyshortcuts" | "aria-label" | "aria-labelledby" | "aria-level" | "aria-live" | "aria-modal" | "aria-multiline" | "aria-multiselectable" | "aria-orientation" | "aria-owns" | "aria-placeholder" | "aria-posinset" | "aria-pressed" | "aria-readonly" | "aria-relevant" | "aria-required" | "aria-roledescription" | "aria-rowcount" | "aria-rowindex" | "aria-rowspan" | "aria-selected" | "aria-setsize" | "aria-sort" | "aria-valuemax" | "aria-valuemin" | "aria-valuenow" | "aria-valuetext" | "dangerouslySetInnerHTML" | "onCopy" | "onCopyCapture" | "onCut" | "onCutCapture" | "onPaste" | "onPasteCapture" | "onCompositionEnd" | "onCompositionEndCapture" | "onCompositionStart" | "onCompositionStartCapture" | "onCompositionUpdate" | "onCompositionUpdateCapture" | "onFocus" | "onFocusCapture" | "onBlur" | "onBlurCapture" | "onChange" | "onChangeCapture" | "onBeforeInput" | "onBeforeInputCapture" | "onInput" | "onInputCapture" | "onReset" | "onResetCapture" | "onSubmit" | "onSubmitCapture" | "onInvalid" | "onInvalidCapture" | "onLoad" | "onLoadCapture" | "onError" | "onErrorCapture" | "onKeyDown" | "onKeyDownCapture" | "onKeyPress" | "onKeyPressCapture" | "onKeyUp" | "onKeyUpCapture" | "onAbort" | "onAbortCapture" | "onCanPlay" | "onCanPlayCapture" | "onCanPlayThrough" | "onCanPlayThroughCapture" | "onDurationChange" | "onDurationChangeCapture" | "onEmptied" | "onEmptiedCapture" | "onEncrypted" | "onEncryptedCapture" | "onEnded" | "onEndedCapture" | "onLoadedData" | "onLoadedDataCapture" | "onLoadedMetadata" | "onLoadedMetadataCapture" | "onLoadStart" | "onLoadStartCapture" | "onPause" | "onPauseCapture" | "onPlay" | "onPlayCapture" | "onPlaying" | "onPlayingCapture" | "onProgress" | "onProgressCapture" | "onRateChange" | "onRateChangeCapture" | "onSeeked" | "onSeekedCapture" | "onSeeking" | "onSeekingCapture" | "onStalled" | "onStalledCapture" | "onSuspend" | "onSuspendCapture" | "onTimeUpdate" | "onTimeUpdateCapture" | "onVolumeChange" | "onVolumeChangeCapture" | "onWaiting" | "onWaitingCapture" | "onAuxClick" | "onAuxClickCapture" | "onClick" | "onClickCapture" | "onContextMenu" | "onContextMenuCapture" | "onDoubleClick" | "onDoubleClickCapture" | "onDrag" | "onDragCapture" | "onDragEnd" | "onDragEndCapture" | "onDragEnter" | "onDragEnterCapture" | "onDragExit" | "onDragExitCapture" | "onDragLeave" | "onDragLeaveCapture" | "onDragOver" | "onDragOverCapture" | "onDragStart" | "onDragStartCapture" | "onDrop" | "onDropCapture" | "onMouseDown" | "onMouseDownCapture" | "onMouseEnter" | "onMouseLeave" | "onMouseMove" | "onMouseMoveCapture" | "onMouseOut" | "onMouseOutCapture" | "onMouseOver" | "onMouseOverCapture" | "onMouseUp" | "onMouseUpCapture" | "onSelect" | "onSelectCapture" | "onTouchCancel" | "onTouchCancelCapture" | "onTouchEnd" | "onTouchEndCapture" | "onTouchMove" | "onTouchMoveCapture" | "onTouchStart" | "onTouchStartCapture" | "onPointerDown" | "onPointerDownCapture" | "onPointerMove" | "onPointerMoveCapture" | "onPointerUp" | "onPointerUpCapture" | "onPointerCancel" | "onPointerCancelCapture" | "onPointerEnter" | "onPointerEnterCapture" | "onPointerLeave" | "onPointerLeaveCapture" | "onPointerOver" | "onPointerOverCapture" | "onPointerOut" | "onPointerOutCapture" | "onGotPointerCapture" | "onGotPointerCaptureCapture" | "onLostPointerCapture" | "onLostPointerCaptureCapture" | "onScroll" | "onScrollCapture" | "onWheel" | "onWheelCapture" | "onAnimationStart" | "onAnimationStartCapture" | "onAnimationEnd" | "onAnimationEndCapture" | "onAnimationIteration" | "onAnimationIterationCapture" | "onTransitionEnd" | "onTransitionEndCapture"> & React.RefAttributes<SVGSVGElement>>; | export declare const EarlybirdsDimensions: {
height: undefined;
width: undefined;
}; |
|
0002_populate_weights.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django_countries import countries
def populate_weights(apps, schema_editor):
Weights = apps.get_model("reports", "Weights")
db_alias = schema_editor.connection.alias
for item in COUNTRY_WEIGHTS:
country = item['Country']
item.pop('Country')
item.pop('Region')
for media_type, weight in item.iteritems():
if media_type != 'Country' or media_type != 'Region':
w = Weights.objects.using(db_alias).create(
country=country,
media_type=media_type,
weight=weight)
w.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
|
COUNTRY_WEIGHTS= [{'Country': 'AF',
'Internet': '0.37',
'Print': '0.33',
'Radio': '0.93',
'Region': 'Asia',
'Television': '0.93',
'Twitter': 1},
{'Country': 'AL',
'Internet': '0.36',
'Print': '1.02',
'Radio': '0.30',
'Region': 'Europe',
'Television': '0.30',
'Twitter': 1},
{'Country': 'AG',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'AR',
'Internet': '1.34',
'Print': '0.74',
'Radio': '1.07',
'Region': 'Latin America',
'Television': '1.07',
'Twitter': 1},
{'Country': 'AM',
'Internet': '0.31',
'Print': '1.02',
'Radio': '0.29',
'Region': 'Europe',
'Television': '0.29',
'Twitter': 1},
{'Country': 'AU',
'Internet': '1.23',
'Print': '0.98',
'Radio': '0.81',
'Region': 'Pacific Islands',
'Television': '0.81',
'Twitter': 1},
{'Country': 'AT',
'Internet': '0.72',
'Print': '0.58',
'Radio': '0.48',
'Region': 'Europe',
'Television': '0.48',
'Twitter': 1},
{'Country': 'BS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BD',
'Internet': '0.88',
'Print': '3.63',
'Radio': '2.09',
'Region': 'Asia',
'Television': '2.09',
'Twitter': 1},
{'Country': 'BB',
'Internet': '0.13',
'Print': '0.13',
'Radio': '0.09',
'Region': 'Caribbean',
'Television': '0.09',
'Twitter': 1},
{'Country': 'BY',
'Internet': '0.59',
'Print': '0.47',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'BE',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BZ',
'Internet': '0.08',
'Print': '0.68',
'Radio': '0.10',
'Region': 'Caribbean',
'Television': '0.10',
'Twitter': 1},
{'Country': 'BJ',
'Internet': '0.18',
'Print': '0.03',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'BT',
'Internet': '0.12',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Asia',
'Television': '0.14',
'Twitter': 1},
{'Country': 'BO',
'Internet': '0.53',
'Print': '0.42',
'Radio': '0.55',
'Region': 'Latin America',
'Television': '0.55',
'Twitter': 1},
{'Country': 'BA',
'Internet': '0.43',
'Print': '0.68',
'Radio': '0.32',
'Region': 'Europe',
'Television': '0.32',
'Twitter': 1},
{'Country': 'BW',
'Internet': '0.14',
'Print': '0.18',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'BR',
'Internet': '2.78',
'Print': '1.64',
'Radio': '2.35',
'Region': 'Latin America',
'Television': '2.35',
'Twitter': 1},
{'Country': 'BG',
'Internet': '0.54',
'Print': '0.41',
'Radio': '0.44',
'Region': 'Europe',
'Television': '0.44',
'Twitter': 1},
{'Country': 'BF',
'Internet': '0.23',
'Print': '0.10',
'Radio': '0.69',
'Region': 'Africa',
'Television': '0.69',
'Twitter': 1},
{'Country': 'BI',
'Internet': '0.10',
'Print': '0.10',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'CM',
'Internet': '0.33',
'Print': '0.17',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'CA',
'Internet': '1.54',
'Print': '1.31',
'Radio': '0.99',
'Region': 'North America',
'Television': '0.99',
'Twitter': 1},
{'Country': 'CV',
'Internet': '0.12',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Africa',
'Television': '0.12',
'Twitter': 1},
{'Country': 'CF',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'TD',
'Internet': '0.15',
'Print': '0.00',
'Radio': '0.60',
'Region': 'Africa',
'Television': '0.60',
'Twitter': 1},
{'Country': 'CL',
'Internet': '0.92',
'Print': '0.37',
'Radio': '0.70',
'Region': 'Latin America',
'Television': '0.70',
'Twitter': 1},
{'Country': 'CN',
'Internet': '6.79',
'Print': '6.23',
'Radio': '6.18',
'Region': 'Asia',
'Television': '6.18',
'Twitter': 1},
{'Country': 'CO',
'Internet': '1.36',
'Print': '0.66',
'Radio': '1.16',
'Region': 'Latin America',
'Television': '1.16',
'Twitter': 1},
{'Country': 'KM',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.14',
'Region': 'Africa',
'Television': '0.14',
'Twitter': 1},
{'Country': 'CD',
'Internet': '0.08',
'Print': '0.28',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'CG',
'Internet': '0.33',
'Print': '0.11',
'Radio': '0.36',
'Region': 'Africa',
'Television': '0.36',
'Twitter': 1},
{'Country': 'CR',
'Internet': '0.42',
'Print': '0.34',
'Radio': '0.37',
'Region': 'Latin America',
'Television': '0.37',
'Twitter': 1},
{'Country': 'HR',
'Internet': '0.45',
'Print': '0.41',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'CU',
'Internet': '0.47',
'Print': '0.12',
'Radio': '0.56',
'Region': 'Caribbean',
'Television': '0.56',
'Twitter': 1},
{'Country': 'CY',
'Internet': '0.23',
'Print': '0.13',
'Radio': '0.18',
'Region': 'Middle East',
'Television': '0.18',
'Twitter': 1},
{'Country': 'DK',
'Internet': '0.50',
'Print': '0.74',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'DO',
'Internet': '0.60',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'EC',
'Internet': '0.66',
'Print': '0.72',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'EG',
'Internet': '1.70',
'Print': '1.43',
'Radio': '1.51',
'Region': 'Middle East',
'Television': '1.51',
'Twitter': 1},
{'Country': 'SV',
'Internet': '0.35',
'Print': '0.32',
'Radio': '0.42',
'Region': 'Latin America',
'Television': '0.42',
'Twitter': 1},
{'Country': 'GQ',
'Internet': '0.09',
'Print': '0.68',
'Radio': '0.15',
'Region': 'Africa',
'Television': '0.15',
'Twitter': 1},
{'Country': 'EE',
'Internet': '0.27',
'Print': '0.27',
'Radio': '0.19',
'Region': 'Europe',
'Television': '0.19',
'Twitter': 1},
{'Country': 'ET',
'Internet': '0.34',
'Print': '0.39',
'Radio': '1.63',
'Region': 'Africa',
'Television': '1.63',
'Twitter': 1},
{'Country': 'FJ',
'Internet': '0.15',
'Print': '0.12',
'Radio': '0.16',
'Region': 'Pacific Islands',
'Television': '0.16',
'Twitter': 1},
{'Country': 'FI',
'Internet': '0.61',
'Print': '0.03',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'FR',
'Internet': '1.99',
'Print': '1.69',
'Radio': '1.33',
'Region': 'Europe',
'Television': '1.33',
'Twitter': 1},
{'Country': 'GA',
'Internet': '0.11',
'Print': '0.58',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GM',
'Internet': '0.14',
'Print': '0.04',
'Radio': '0.23',
'Region': 'Africa',
'Television': '0.23',
'Twitter': 1},
{'Country': 'GE',
'Internet': '0.40',
'Print': '1.02',
'Radio': '0.34',
'Region': 'Europe',
'Television': '0.34',
'Twitter': 1},
{'Country': 'DE',
'Internet': '2.27',
'Print': '2.50',
'Radio': '1.51',
'Region': 'Europe',
'Television': '1.51',
'Twitter': 1},
{'Country': 'GH',
'Internet': '0.61',
'Print': '0.39',
'Radio': '0.85',
'Region': 'Africa',
'Television': '0.85',
'Twitter': 1},
{'Country': 'GR',
'Internet': '0.68',
'Print': '0.44',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'GD',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'GT',
'Internet': '0.44',
'Print': '0.38',
'Radio': '0.66',
'Region': 'Latin America',
'Television': '0.66',
'Twitter': 1},
{'Country': 'GW',
'Internet': '0.06',
'Print': '0.68',
'Radio': '0.22',
'Region': 'Africa',
'Television': '0.22',
'Twitter': 1},
{'Country': 'GN',
'Internet': '0.68',
'Print': '1.67',
'Radio': '0.56',
'Region': 'Africa',
'Television': '0.56',
'Twitter': 1},
{'Country': 'GY',
'Internet': '0.15',
'Print': '0.15',
'Radio': '0.15',
'Region': 'Caribbean',
'Television': '0.15',
'Twitter': 1},
{'Country': 'HT',
'Internet': '0.30',
'Print': '0.17',
'Radio': '0.54',
'Region': 'Caribbean',
'Television': '0.54',
'Twitter': 1},
{'Country': 'HU',
'Internet': '0.73',
'Print': '0.68',
'Radio': '0.52',
'Region': 'Europe',
'Television': '0.52',
'Twitter': 1},
{'Country': 'IS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.10',
'Region': 'Europe',
'Television': '0.10',
'Twitter': 1},
{'Country': 'IN',
'Internet': '4.18',
'Print': '5.72',
'Radio': '5.90',
'Region': 'Asia',
'Television': '5.90',
'Twitter': 1},
{'Country': 'IE',
'Internet': '0.52',
'Print': '0.18',
'Radio': '0.36',
'Region': 'Europe',
'Television': '0.36',
'Twitter': 1},
{'Country': 'IL',
'Internet': '0.65',
'Print': '0.89',
'Radio': '0.46',
'Region': 'Middle East',
'Television': '0.46',
'Twitter': 1},
{'Country': 'IT',
'Internet': '1.62',
'Print': '1.51',
'Radio': '1.29',
'Region': 'Europe',
'Television': '1.29',
'Twitter': 1},
{'Country': 'CI',
'Internet': '0.73',
'Print': '1.02',
'Radio': '0.79',
'Region': 'Africa',
'Television': '0.79',
'Twitter': 1},
{'Country': 'JM',
'Internet': '0.32',
'Print': '0.27',
'Radio': '0.28',
'Region': 'Caribbean',
'Television': '0.28',
'Twitter': 1},
{'Country': 'JP',
'Internet': '2.80',
'Print': '5.27',
'Radio': '1.87',
'Region': 'Asia',
'Television': '1.87',
'Twitter': 1},
{'Country': 'KZ',
'Internet': '0.84',
'Print': '0.58',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'KE',
'Internet': '1.10',
'Print': '0.44',
'Radio': '1.12',
'Region': 'Africa',
'Television': '1.12',
'Twitter': 1},
{'Country': 'KG',
'Internet': '0.31',
'Print': '0.05',
'Radio': '0.39',
'Region': 'Asia',
'Television': '0.39',
'Twitter': 1},
{'Country': 'LB',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.37',
'Region': 'Middle East',
'Television': '0.37',
'Twitter': 1},
{'Country': 'LS',
'Internet': '0.09',
'Print': '0.08',
'Radio': '0.24',
'Region': 'Africa',
'Television': '0.24',
'Twitter': 1},
{'Country': 'LR',
'Internet': '0.12',
'Print': '0.13',
'Radio': '0.35',
'Region': 'Africa',
'Television': '0.35',
'Twitter': 1},
{'Country': 'LU',
'Internet': '0.19',
'Print': '0.18',
'Radio': '0.12',
'Region': 'Europe',
'Television': '0.12',
'Twitter': 1},
{'Country': 'MK',
'Internet': '0.22',
'Print': '0.58',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'MG',
'Internet': '1.11',
'Print': '0.19',
'Radio': '0.80',
'Region': 'Africa',
'Television': '0.80',
'Twitter': 1},
{'Country': 'MW',
'Internet': '0.93',
'Print': '0.11',
'Radio': '0.68',
'Region': 'Africa',
'Television': '0.68',
'Twitter': 1},
{'Country': 'MY',
'Internet': '0.22',
'Print': '1.07',
'Radio': '0.91',
'Region': 'Asia',
'Television': '0.91',
'Twitter': 1},
{'Country': 'ML',
'Internet': '0.92',
'Print': '0.68',
'Radio': '0.66',
'Region': 'Africa',
'Television': '0.66',
'Twitter': 1},
{'Country': 'MT',
'Internet': '0.11',
'Print': '0.13',
'Radio': '0.11',
'Region': 'Europe',
'Television': '0.11',
'Twitter': 1},
{'Country': 'MR',
'Internet': '0.18',
'Print': '0.68',
'Radio': '0.33',
'Region': 'Africa',
'Television': '0.33',
'Twitter': 1},
{'Country': 'MU',
'Internet': '0.07',
'Print': '0.62',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'MX',
'Internet': '1.91',
'Print': '0.06',
'Radio': '1.84',
'Region': 'Latin America',
'Television': '1.84',
'Twitter': 1},
{'Country': 'MD',
'Internet': '0.33',
'Print': '0.16',
'Radio': '0.31',
'Region': 'Europe',
'Television': '0.31',
'Twitter': 1},
{'Country': 'MN',
'Internet': '0.19',
'Print': '0.14',
'Radio': '0.28',
'Region': 'Asia',
'Television': '0.28',
'Twitter': 1},
{'Country': 'ME',
'Internet': '0.16',
'Print': '0.00',
'Radio': '0.13',
'Region': 'Europe',
'Television': '0.13',
'Twitter': 1},
{'Country': 'MA',
'Internet': '1.20',
'Print': '0.38',
'Radio': '0.96',
'Region': 'Middle East',
'Television': '0.96',
'Twitter': 1},
{'Country': 'NA',
'Internet': '0.16',
'Print': '0.15',
'Radio': '0.25',
'Region': 'Africa',
'Television': '0.25',
'Twitter': 1},
{'Country': 'NP',
'Internet': '0.49',
'Print': '0.30',
'Radio': '0.88',
'Region': 'Asia',
'Television': '0.88',
'Twitter': 1},
{'Country': 'NL',
'Internet': '1.08',
'Print': '1.19',
'Radio': '0.68',
'Region': 'Europe',
'Television': '0.68',
'Twitter': 1},
{'Country': 'NZ',
'Internet': '0.55',
'Print': '0.68',
'Radio': '0.35',
'Region': 'Pacific Islands',
'Television': '0.35',
'Twitter': 1},
{'Country': 'NI',
'Internet': '0.25',
'Print': '0.26',
'Radio': '0.41',
'Region': 'Latin America',
'Television': '0.41',
'Twitter': 1},
{'Country': 'NE',
'Internet': '0.15',
'Print': '0.08',
'Radio': '0.71',
'Region': 'Africa',
'Television': '0.71',
'Twitter': 1},
{'Country': 'NG',
'Internet': '2.19',
'Print': '1.19',
'Radio': '2.21',
'Region': 'Africa',
'Television': '2.21',
'Twitter': 1},
{'Country': 'NO',
'Internet': '0.59',
'Print': '0.83',
'Radio': '0.37',
'Region': 'Europe',
'Television': '0.37',
'Twitter': 1},
{'Country': 'PK',
'Internet': '1.20',
'Print': '0.06',
'Radio': '2.25',
'Region': 'Asia',
'Television': '2.25',
'Twitter': 1},
{'Country': 'PS',
'Internet': '0.54',
'Print': '0.00',
'Radio': '0.59',
'Region': 'Middle East',
'Television': '0.59',
'Twitter': 1},
{'Country': 'PY',
'Internet': '0.38',
'Print': '0.31',
'Radio': '0.44',
'Region': 'Latin America',
'Television': '0.44',
'Twitter': 1},
{'Country': 'PE',
'Internet': '0.95',
'Print': '1.92',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'PH',
'Internet': '1.68',
'Print': '1.65',
'Radio': '1.66',
'Region': 'Asia',
'Television': '1.66',
'Twitter': 1},
{'Country': 'PL',
'Internet': '1.36',
'Print': '1.11',
'Radio': '1.02',
'Region': 'Europe',
'Television': '1.02',
'Twitter': 1},
{'Country': 'PT',
'Internet': '0.71',
'Print': '0.63',
'Radio': '0.54',
'Region': 'Europe',
'Television': '0.54',
'Twitter': 1},
{'Country': 'PR',
'Internet': '0.38',
'Print': '0.53',
'Radio': '0.32',
'Region': 'Latin America',
'Television': '0.32',
'Twitter': 1},
{'Country': 'RO',
'Internet': '0.90',
'Print': '0.65',
'Radio': '0.77',
'Region': 'Europe',
'Television': '0.77',
'Twitter': 1},
{'Country': 'WS',
'Internet': '0.04',
'Print': '0.68',
'Radio': '0.07',
'Region': 'Pacific Islands',
'Television': '0.07',
'Twitter': 1},
{'Country': 'SN',
'Internet': '0.48',
'Print': '0.21',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'RS',
'Internet': '0.58',
'Print': '0.58',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'SL',
'Internet': '0.08',
'Print': '0.07',
'Radio': '0.41',
'Region': 'Africa',
'Television': '0.41',
'Twitter': 1},
{'Country': 'SK',
'Internet': '0.57',
'Print': '0.68',
'Radio': '0.39',
'Region': 'Europe',
'Television': '0.39',
'Twitter': 1},
{'Country': 'SI',
'Internet': '0.33',
'Print': '0.31',
'Radio': '0.24',
'Region': 'Europe',
'Television': '0.24',
'Twitter': 1},
{'Country': 'SB',
'Internet': '0.06',
'Print': '0.04',
'Radio': '0.13',
'Region': 'Pacific Islands',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SO',
'Internet': '0.11',
'Print': '0.68',
'Radio': '0.54',
'Region': 'Africa',
'Television': '0.54',
'Twitter': 1},
{'Country': 'ZA',
'Internet': '1.34',
'Print': '0.76',
'Radio': '1.21',
'Region': 'Africa',
'Television': '1.21',
'Twitter': 1},
{'Country': 'KR',
'Internet': '1.80',
'Print': '1.67',
'Radio': '1.17',
'Region': 'Asia',
'Television': '1.17',
'Twitter': 1},
{'Country': 'ES',
'Internet': '1.59',
'Print': '1.35',
'Radio': '1.14',
'Region': 'Europe',
'Television': '1.14',
'Twitter': 1},
{'Country': 'LC',
'Internet': '0.06',
'Print': '0.18',
'Radio': '0.07',
'Region': 'Caribbean',
'Television': '0.07',
'Twitter': 1},
{'Country': 'VC',
'Internet': '0.05',
'Print': '0.68',
'Radio': '0.05',
'Region': 'Caribbean',
'Television': '0.05',
'Twitter': 1},
{'Country': 'SD',
'Internet': '0.82',
'Print': '0.60',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'SS',
'Internet': '0.15',
'Print': '0.18',
'Radio': '0.48',
'Region': 'Africa',
'Television': '0.48',
'Twitter': 1},
{'Country': 'SR',
'Internet': '0.12',
'Print': '0.12',
'Radio': '0.13',
'Region': 'Caribbean',
'Television': '0.13',
'Twitter': 1},
{'Country': 'SZ',
'Internet': '0.15',
'Print': '0.10',
'Radio': '0.19',
'Region': 'Africa',
'Television': '0.19',
'Twitter': 1},
{'Country': 'SE',
'Internet': '0.78',
'Print': '1.11',
'Radio': '0.51',
'Region': 'Europe',
'Television': '0.51',
'Twitter': 1},
{'Country': 'CH',
'Internet': '0.72',
'Print': '0.94',
'Radio': '0.47',
'Region': 'Europe',
'Television': '0.47',
'Twitter': 1},
{'Country': 'TW',
'Internet': '1.00',
'Print': '0.68',
'Radio': '0.80',
'Region': 'Asia',
'Television': '0.80',
'Twitter': 1},
{'Country': 'TZ',
'Internet': '0.74',
'Print': '0.35',
'Radio': '1.18',
'Region': 'Africa',
'Television': '1.18',
'Twitter': 1},
{'Country': 'TG',
'Internet': '0.15',
'Print': '0.07',
'Radio': '0.44',
'Region': 'Africa',
'Television': '0.44',
'Twitter': 1},
{'Country': 'TO',
'Internet': '0.05',
'Print': '0.05',
'Radio': '0.05',
'Region': 'Pacific Islands',
'Television': '0.05',
'Twitter': 1},
{'Country': 'TT',
'Internet': '0.25',
'Print': '0.18',
'Radio': '0.19',
'Region': 'Caribbean',
'Television': '0.19',
'Twitter': 1},
{'Country': 'TN',
'Internet': '0.60',
'Print': '0.31',
'Radio': '0.55',
'Region': 'Middle East',
'Television': '0.55',
'Twitter': 1},
{'Country': 'TR',
'Internet': '1.59',
'Print': '0.94',
'Radio': '1.44',
'Region': 'Europe',
'Television': '1.44',
'Twitter': 1},
{'Country': 'UG',
'Internet': '0.68',
'Print': '0.16',
'Radio': '1.03',
'Region': 'Africa',
'Television': '1.03',
'Twitter': 1},
{'Country': 'GB',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'US',
'Internet': '4.48',
'Print': '4.43',
'Radio': '2.98',
'Region': 'North America',
'Television': '2.98',
'Twitter': 1},
{'Country': 'UY',
'Internet': '0.38',
'Print': '0.56',
'Radio': '0.31',
'Region': 'Latin America',
'Television': '0.31',
'Twitter': 1},
{'Country': 'VU',
'Internet': '0.05',
'Print': '0.58',
'Radio': '0.08',
'Region': 'Asia',
'Television': '0.08',
'Twitter': 1},
{'Country': 'VE',
'Internet': '1.02',
'Print': '1.01',
'Radio': '0.92',
'Region': 'Latin America',
'Television': '0.92',
'Twitter': 1},
{'Country': 'VN',
'Internet': '1.69',
'Print': '0.52',
'Radio': '1.59',
'Region': 'Asia',
'Television': '1.59',
'Twitter': 1},
{'Country': 'ZM',
'Internet': '0.41',
'Print': '0.15',
'Radio': '0.64',
'Region': 'Africa',
'Television': '0.64',
'Twitter': 1},
{'Country': 'ZW',
'Internet': '0.45',
'Print': '0.30',
'Radio': '0.63',
'Region': 'Africa',
'Television': '0.63',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'WL',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'SQ',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'EN',
'Internet': '2.02',
'Print': '2.23',
'Radio': '1.32',
'Region': 'Europe',
'Television': '1.32',
'Twitter': 1},
{'Country': 'B1',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1},
{'Country': 'B2',
'Internet': '0.82',
'Print': '0.70',
'Radio': '0.55',
'Region': 'Europe',
'Television': '0.55',
'Twitter': 1}]
| dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.RunPython(
populate_weights,
backwards,
),
] |
bench.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode, Throughput};
use lzfse_rust::{self, LzfseDecoder, LzfseEncoder, LzfseRingDecoder, LzfseRingEncoder};
use std::time::Duration;
const SAMPLE_SIZE: usize = 20;
const MEASUREMENT_TIME: Duration = Duration::from_secs(20);
// Snappy benchmarks.
const CORPUS_HTML: &[u8] = include_bytes!("../../data/snappy/html.lzfse");
const CORPUS_URLS_10K: &[u8] = include_bytes!("../../data/snappy/urls.10K.lzfse");
const CORPUS_FIREWORKS: &[u8] = include_bytes!("../../data/snappy/fireworks.jpeg.lzfse");
const CORPUS_PAPER_100K: &[u8] = include_bytes!("../../data/snappy/paper-100k.pdf.lzfse");
const CORPUS_HTML_X_4: &[u8] = include_bytes!("../../data/snappy/html_x_4.lzfse");
const CORPUS_ALICE29: &[u8] = include_bytes!("../../data/snappy/alice29.txt.lzfse");
const CORPUS_ASYOULIK: &[u8] = include_bytes!("../../data/snappy/asyoulik.txt.lzfse");
const CORPUS_LCET10: &[u8] = include_bytes!("../../data/snappy/lcet10.txt.lzfse");
const CORPUS_PLRABN12: &[u8] = include_bytes!("../../data/snappy/plrabn12.txt.lzfse");
const CORPUS_GEOPROTO: &[u8] = include_bytes!("../../data/snappy/geo.protodata.lzfse");
const CORPUS_KPPKN: &[u8] = include_bytes!("../../data/snappy/kppkn.gtb.lzfse");
// Synthetic benchmarks.
// Noise: stress literal/ null match vectors.
const SYNTH_RANDOM: &[u8] = include_bytes!("../../data/synth/random.lzfse");
// Random words matches: stress short match run/ long distance vectors.
const SYNTH_WORD04: &[u8] = include_bytes!("../../data/synth/word04.lzfse");
const SYNTH_WORD05: &[u8] = include_bytes!("../../data/synth/word05.lzfse");
const SYNTH_WORD06: &[u8] = include_bytes!("../../data/synth/word06.lzfse");
const SYNTH_WORD07: &[u8] = include_bytes!("../../data/synth/word07.lzfse");
const SYNTH_WORD08: &[u8] = include_bytes!("../../data/synth/word08.lzfse");
const SYNTH_WORD09: &[u8] = include_bytes!("../../data/synth/word09.lzfse");
const SYNTH_WORD10: &[u8] = include_bytes!("../../data/synth/word10.lzfse");
const SYNTH_WORD11: &[u8] = include_bytes!("../../data/synth/word11.lzfse");
const SYNTH_WORD12: &[u8] = include_bytes!("../../data/synth/word12.lzfse");
const SYNTH_WORD13: &[u8] = include_bytes!("../../data/synth/word13.lzfse");
const SYNTH_WORD14: &[u8] = include_bytes!("../../data/synth/word14.lzfse");
const SYNTH_WORD15: &[u8] = include_bytes!("../../data/synth/word15.lzfse");
const SYNTH_WORD16: &[u8] = include_bytes!("../../data/synth/word16.lzfse");
const SYNTH_WORD32: &[u8] = include_bytes!("../../data/synth/word32.lzfse");
const SYNTH_WORD64: &[u8] = include_bytes!("../../data/synth/word64.lzfse");
// Long fixed repeating sequences: stress long match/ short distance vectors.
const SYNTH_REPL01: &[u8] = include_bytes!("../../data/synth/repl01.lzfse");
const SYNTH_REPL02: &[u8] = include_bytes!("../../data/synth/repl02.lzfse");
const SYNTH_REPL03: &[u8] = include_bytes!("../../data/synth/repl03.lzfse");
const SYNTH_REPL04: &[u8] = include_bytes!("../../data/synth/repl04.lzfse");
const SYNTH_REPL05: &[u8] = include_bytes!("../../data/synth/repl05.lzfse");
const SYNTH_REPL06: &[u8] = include_bytes!("../../data/synth/repl06.lzfse");
const SYNTH_REPL07: &[u8] = include_bytes!("../../data/synth/repl07.lzfse");
const SYNTH_REPL08: &[u8] = include_bytes!("../../data/synth/repl08.lzfse");
const SYNTH_REPL09: &[u8] = include_bytes!("../../data/synth/repl09.lzfse");
const SYNTH_REPL10: &[u8] = include_bytes!("../../data/synth/repl10.lzfse");
const SYNTH_REPL11: &[u8] = include_bytes!("../../data/synth/repl11.lzfse");
const SYNTH_REPL12: &[u8] = include_bytes!("../../data/synth/repl12.lzfse");
const SYNTH_REPL13: &[u8] = include_bytes!("../../data/synth/repl13.lzfse");
const SYNTH_REPL14: &[u8] = include_bytes!("../../data/synth/repl14.lzfse");
const SYNTH_REPL15: &[u8] = include_bytes!("../../data/synth/repl15.lzfse");
const SYNTH_REPL16: &[u8] = include_bytes!("../../data/synth/repl16.lzfse");
const SYNTH_REPL32: &[u8] = include_bytes!("../../data/synth/repl32.lzfse");
const SYNTH_REPL64: &[u8] = include_bytes!("../../data/synth/repl64.lzfse");
// Short fixed repeating sequences: stress short match/ short distance vectors.
// Asymmetric encode/ decode.
const SYNTH_REPS04: &[u8] = include_bytes!("../../data/synth/reps04.lzfse");
const SYNTH_REPS05: &[u8] = include_bytes!("../../data/synth/reps05.lzfse");
const SYNTH_REPS06: &[u8] = include_bytes!("../../data/synth/reps06.lzfse");
const SYNTH_REPS07: &[u8] = include_bytes!("../../data/synth/reps07.lzfse");
const SYNTH_REPS08: &[u8] = include_bytes!("../../data/synth/reps08.lzfse");
const SYNTH_REPS09: &[u8] = include_bytes!("../../data/synth/reps09.lzfse");
const SYNTH_REPS10: &[u8] = include_bytes!("../../data/synth/reps10.lzfse");
const SYNTH_REPS11: &[u8] = include_bytes!("../../data/synth/reps11.lzfse");
const SYNTH_REPS12: &[u8] = include_bytes!("../../data/synth/reps12.lzfse");
const SYNTH_REPS13: &[u8] = include_bytes!("../../data/synth/reps13.lzfse");
const SYNTH_REPS14: &[u8] = include_bytes!("../../data/synth/reps14.lzfse");
const SYNTH_REPS15: &[u8] = include_bytes!("../../data/synth/reps15.lzfse");
const SYNTH_REPS16: &[u8] = include_bytes!("../../data/synth/reps16.lzfse");
const SYNTH_REPS32: &[u8] = include_bytes!("../../data/synth/reps32.lzfse");
const SYNTH_REPS64: &[u8] = include_bytes!("../../data/synth/reps64.lzfse");
const SYNTH_REPSIN: &[u8] = include_bytes!("../../data/synth/repsin.lzfse");
fn all(c: &mut Criterion) {
#[cfg(feature = "lzfse_ref")]
snappy(c, lzfse_ref_encode);
#[cfg(feature = "lzfse_ref")]
snappy(c, lzfse_ref_decode);
snappy(c, rust_encode);
snappy(c, rust_decode);
snappy(c, rust_ring_encode);
snappy(c, rust_ring_decode);
#[cfg(feature = "lzfse_ref")]
synth_random(c, lzfse_ref_encode);
#[cfg(feature = "lzfse_ref")]
synth_random(c, lzfse_ref_decode);
synth_random(c, rust_encode);
synth_random(c, rust_decode);
synth_random(c, rust_ring_encode);
synth_random(c, rust_ring_decode);
#[cfg(feature = "lzfse_ref")]
synth_word(c, lzfse_ref_encode);
#[cfg(feature = "lzfse_ref")]
synth_word(c, lzfse_ref_decode);
synth_word(c, rust_encode);
synth_word(c, rust_decode);
synth_word(c, rust_ring_encode);
synth_word(c, rust_ring_decode);
#[cfg(feature = "lzfse_ref")]
synth_repl(c, lzfse_ref_decode);
synth_repl(c, rust_decode);
synth_repl(c, rust_ring_decode);
}
/// Synthetic data
fn synth_random(c: &mut Criterion, mut engine: impl FnMut(&mut Criterion, &str, &[u8])) {
engine(c, "synth_random", SYNTH_RANDOM);
}
/// Synthetic data
fn synth_word(c: &mut Criterion, mut engine: impl FnMut(&mut Criterion, &str, &[u8])) {
engine(c, "synth_word04", SYNTH_WORD04);
engine(c, "synth_word05", SYNTH_WORD05);
engine(c, "synth_word06", SYNTH_WORD06);
engine(c, "synth_word07", SYNTH_WORD07);
engine(c, "synth_word08", SYNTH_WORD08);
engine(c, "synth_word09", SYNTH_WORD09);
engine(c, "synth_word10", SYNTH_WORD10);
engine(c, "synth_word11", SYNTH_WORD11);
engine(c, "synth_word12", SYNTH_WORD12);
engine(c, "synth_word13", SYNTH_WORD13);
engine(c, "synth_word14", SYNTH_WORD14);
engine(c, "synth_word15", SYNTH_WORD15);
engine(c, "synth_word16", SYNTH_WORD16);
engine(c, "synth_word32", SYNTH_WORD32);
engine(c, "synth_word64", SYNTH_WORD64);
}
/// Synthetic data
fn synth_repl(c: &mut Criterion, mut engine: impl FnMut(&mut Criterion, &str, &[u8])) {
engine(c, "synth_repl01", SYNTH_REPL01);
engine(c, "synth_repl02", SYNTH_REPL02);
engine(c, "synth_repl03", SYNTH_REPL03);
engine(c, "synth_repl04", SYNTH_REPL04);
engine(c, "synth_repl05", SYNTH_REPL05);
engine(c, "synth_repl06", SYNTH_REPL06);
engine(c, "synth_repl07", SYNTH_REPL07);
engine(c, "synth_repl08", SYNTH_REPL08);
engine(c, "synth_repl09", SYNTH_REPL09);
engine(c, "synth_repl10", SYNTH_REPL10);
engine(c, "synth_repl11", SYNTH_REPL11);
engine(c, "synth_repl12", SYNTH_REPL12);
engine(c, "synth_repl13", SYNTH_REPL13);
engine(c, "synth_repl14", SYNTH_REPL14);
engine(c, "synth_repl15", SYNTH_REPL15);
engine(c, "synth_repl16", SYNTH_REPL16);
engine(c, "synth_repl32", SYNTH_REPL32);
engine(c, "synth_repl64", SYNTH_REPL64);
engine(c, "synth_reps04", SYNTH_REPS04);
engine(c, "synth_reps05", SYNTH_REPS05);
engine(c, "synth_reps06", SYNTH_REPS06);
engine(c, "synth_reps07", SYNTH_REPS07);
engine(c, "synth_reps08", SYNTH_REPS08);
engine(c, "synth_reps09", SYNTH_REPS09);
engine(c, "synth_reps10", SYNTH_REPS10);
engine(c, "synth_reps11", SYNTH_REPS11);
engine(c, "synth_reps12", SYNTH_REPS12);
engine(c, "synth_reps13", SYNTH_REPS13);
engine(c, "synth_reps14", SYNTH_REPS14);
engine(c, "synth_reps15", SYNTH_REPS15);
engine(c, "synth_reps16", SYNTH_REPS16);
engine(c, "synth_reps32", SYNTH_REPS32);
engine(c, "synth_reps64", SYNTH_REPS64);
engine(c, "synth_repsin", SYNTH_REPSIN);
}
/// Snappy data
#[rustfmt::skip]
fn snappy(c: &mut Criterion, mut engine: impl FnMut(&mut Criterion, &str, &[u8])) {
engine(c, "snap_uflat00_html", CORPUS_HTML);
engine(c, "snap_uflat01_urls", CORPUS_URLS_10K);
engine(c, "snap_uflat02_jpg", CORPUS_FIREWORKS);
engine(c, "snap_uflat04_pdf", CORPUS_PAPER_100K);
engine(c, "snap_uflat05_html4", CORPUS_HTML_X_4);
engine(c, "snap_uflat06_txt1", CORPUS_ALICE29);
engine(c, "snap_uflat07_txt2", CORPUS_ASYOULIK);
engine(c, "snap_uflat08_txt3", CORPUS_LCET10);
engine(c, "snap_uflat09_txt4", CORPUS_PLRABN12);
engine(c, "snap_uflat10_pb", CORPUS_GEOPROTO);
engine(c, "snap_uflat11_gaviota", CORPUS_KPPKN);
}
fn rust_encode(c: &mut Criterion, tag: &str, enc: &[u8]) {
let mut encoder = LzfseEncoder::default();
encode(c, "rust", tag, enc, |src, dst| {
dst.clear();
encoder.encode_bytes(src, dst).expect("encode error");
})
}
fn rust_decode(c: &mut Criterion, tag: &str, enc: &[u8]) {
let mut decoder = LzfseDecoder::default();
decode(c, "rust", tag, enc, |src, dst| {
dst.clear();
decoder.decode_bytes(src, dst).expect("decode error");
})
}
fn rust_ring_encode(c: &mut Criterion, tag: &str, enc: &[u8]) { | encoder.encode(&mut src, dst).expect("encode error");
})
}
fn rust_ring_decode(c: &mut Criterion, tag: &str, enc: &[u8]) {
let mut decoder = LzfseRingDecoder::default();
decode(c, "rust_ring", tag, enc, |mut src, dst| {
dst.clear();
decoder.decode(&mut src, dst).expect("decode error");
})
}
#[cfg(feature = "lzfse_ref")]
fn lzfse_ref_encode(c: &mut Criterion, tag: &str, enc: &[u8]) {
encode(c, "lzfse_ref", tag, enc, |src, dst| {
assert_ne!(lzfse_sys::encode(src, dst.as_mut_slice()), 0);
})
}
#[cfg(feature = "lzfse_ref")]
fn lzfse_ref_decode(c: &mut Criterion, tag: &str, enc: &[u8]) {
decode(c, "lzfse_ref", tag, enc, |src, dst| {
assert_ne!(lzfse_sys::decode(src, dst.as_mut_slice()), 0);
})
}
fn encode(
c: &mut Criterion,
engine: &str,
tag: &str,
enc: &[u8],
f: impl FnMut(&[u8], &mut Vec<u8>),
) {
let dec = decode_bytes(enc);
let len = dec.len();
let mut enc = vec![0u8; enc.len() + 4096];
let mut bench_name: String = "encode/".to_owned();
bench_name.push_str(tag);
execute(c, engine, &bench_name, &dec, &mut enc, len, f);
}
fn decode(
c: &mut Criterion,
engine: &str,
tag: &str,
enc: &[u8],
f: impl FnMut(&[u8], &mut Vec<u8>),
) {
let mut dec = decode_bytes(enc);
let len = dec.len();
let mut bench_name: String = "decode/".to_owned();
bench_name.push_str(tag);
execute(c, engine, &bench_name, enc, &mut dec, len, f);
}
fn execute(
c: &mut Criterion,
engine: &str,
bench_name: &str,
src: &[u8],
dst: &mut Vec<u8>,
len: usize,
mut f: impl FnMut(&[u8], &mut Vec<u8>),
) {
let mut group = c.benchmark_group(engine);
group.measurement_time(MEASUREMENT_TIME);
group.sample_size(SAMPLE_SIZE);
group.sampling_mode(SamplingMode::Flat);
group.throughput(Throughput::Bytes(len as u64));
group.bench_function(bench_name, |b| b.iter(|| f(black_box(src), black_box(dst))));
group.finish();
}
fn decode_bytes(enc: &[u8]) -> Vec<u8> {
let mut dec = Vec::default();
lzfse_rust::decode_bytes(enc, &mut dec).expect("decode error");
dec
}
criterion_group!(benches, all);
criterion_main!(benches); | let mut encoder = LzfseRingEncoder::default();
encode(c, "rust_ring", tag, enc, |mut src, dst| {
dst.clear(); |
stats.js | // get all workout data from back-end
fetch("/api/workouts/range")
.then(response => {
return response.json();
})
.then(data => {
populateChart(data);
});
API.getWorkoutsInRange()
function | () {
const arr = [
"#003f5c",
"#2f4b7c",
"#665191",
"#a05195",
"#d45087",
"#f95d6a",
"#ff7c43",
"ffa600",
"#003f5c",
"#2f4b7c",
"#665191",
"#a05195",
"#d45087",
"#f95d6a",
"#ff7c43",
"ffa600"
];
return arr;
}
function populateChart(data) {
let durations = duration(data);
let totalDurations = totalDuration(data);
let pounds = calculateWeight(data);
let totalPounds = calculateTotalWeight(data);
let workouts = workoutNames(data);
let days = workoutDays(data);
const colors = generatePalette();
document.querySelector("h2").textContent = `(${days.all[0]} to ${days.all[days.all.length - 1]})`;
let line = document.querySelector("#canvas").getContext("2d");
let bar = document.querySelector("#canvas2").getContext("2d");
let pie = document.querySelector("#canvas3").getContext("2d");
let pie2 = document.querySelector("#canvas4").getContext("2d");
let lineChart = new Chart(line, {
type: "line",
data: {
labels: days.all,
datasets: [
{
label: "Workout Duration In Minutes",
backgroundColor: "red",
borderColor: "red",
data: totalDurations,
fill: false
}
]
},
options: {
responsive: true,
title: {
display: true,
text: "Workout Durations"
},
scales: {
xAxes: [
{
display: true,
scaleLabel: {
display: true
}
}
],
yAxes: [
{
display: true,
scaleLabel: {
display: true
}
}
]
}
}
});
let barChart = new Chart(bar, {
type: "bar",
data: {
labels: days.resistance,
datasets: [
{
label: "Pounds",
data: totalPounds,
backgroundColor: [
"rgba(255, 99, 132, 0.2)",
"rgba(54, 162, 235, 0.2)",
"rgba(255, 206, 86, 0.2)",
"rgba(75, 192, 192, 0.2)",
"rgba(153, 102, 255, 0.2)",
"rgba(255, 159, 64, 0.2)",
"rgba(255, 99, 132, 0.2)",
"rgba(54, 162, 235, 0.2)",
"rgba(255, 206, 86, 0.2)",
"rgba(75, 192, 192, 0.2)"
],
borderColor: [
"rgba(255, 99, 132, 1)",
"rgba(54, 162, 235, 1)",
"rgba(255, 206, 86, 1)",
"rgba(75, 192, 192, 1)",
"rgba(153, 102, 255, 1)",
"rgba(255, 159, 64, 1)",
"rgba(255, 99, 132, 1)",
"rgba(54, 162, 235, 1)",
"rgba(255, 206, 86, 1)",
"rgba(75, 192, 192, 1)"
],
borderWidth: 1
}
]
},
options: {
title: {
display: true,
text: "Resistance Workout Weight Lifted"
},
scales: {
yAxes: [
{
ticks: {
beginAtZero: true
}
}
]
}
}
});
let pieChart = new Chart(pie, {
type: "pie",
data: {
labels: workouts.all,
datasets: [
{
label: "Exercises Performed",
backgroundColor: colors,
data: durations
}
]
},
options: {
title: {
display: true,
text: "Duration in Minutes of Each Exercise Relative to Total Workout Time"
}
}
});
let donutChart = new Chart(pie2, {
type: "doughnut",
data: {
labels: workouts.resistance,
datasets: [
{
label: "Exercises Performed",
backgroundColor: colors,
data: pounds
}
]
},
options: {
title: {
display: true,
text: "Weight in Pounds of Each Resistance Exercise Relative to Total Weight Lifted"
}
}
});
}
function duration(data) {
let durations = [];
data.forEach(workout => {
workout.exercises.forEach(exercise => {
durations.push(exercise.duration);
});
});
return durations;
}
function totalDuration(data) {
let totalDurations = [];
data.forEach(workout => {
totalDurations.push(workout.totalDuration);
});
return totalDurations;
}
function calculateWeight(data) {
let pounds = [];
data.forEach(workout => {
workout.exercises.forEach(exercise => {
if (exercise.type === "resistance") {
pounds.push(exercise.weight);
}
});
});
return pounds;
}
function calculateTotalWeight(data) {
let totalPounds = [];
data.forEach(workout => {
let workoutPounds = 0;
workout.exercises.forEach(exercise => {
if (exercise.type === "resistance") {
workoutPounds += exercise.weight;
}
});
if (workoutPounds !== 0) {
totalPounds.push(workoutPounds);
}
});
return totalPounds;
}
function workoutNames(data) {
let workouts = {
all: [],
resistance: []
};
data.forEach(workout => {
workout.exercises.forEach(exercise => {
workouts.all.push(exercise.name);
if (exercise.type === "resistance") {
workouts.resistance.push(exercise.name);
}
});
});
return workouts;
}
function workoutDays(data) {
let days = {
all: [],
resistance: []
};
data.forEach(workout => {
days.all.push(moment(workout.day).format("ddd M/D, h:mm A"));
let exercises = workout.exercises;
for (let i = 0; i < exercises.length; i++) {
if (exercises[i].type === "resistance") {
days.resistance.push(moment(workout.day).format("ddd M/D, h:mm A"));
break;
}
}
});
return days;
} | generatePalette |
test_utils.rs | use portpicker::pick_unused_port;
use std::future::Future;
use std::pin::Pin;
/// An owned dynamically typed [`Future`] for use in cases where you can't
/// statically type your result or need to add some indirection.
#[allow(dead_code)]
pub type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
/// Find an unused port.
#[allow(dead_code)]
pub async fn | () -> u16 {
pick_unused_port().expect("No ports free")
}
| find_port |
transition_test.go | package transition_test
import (
"errors"
"testing"
"github.com/conku/gorm"
_ "github.com/mattn/go-sqlite3"
"github.com/conku/qor/test/utils"
"github.com/conku/transition"
)
type Order struct {
Id int
Address string
transition.Transition
}
var db = utils.TestDB()
func init() {
for _, model := range []interface{}{&Order{}, &transition.StateChangeLog{}} {
if err := db.DropTableIfExists(model).Error; err != nil {
panic(err)
}
if err := db.AutoMigrate(model).Error; err != nil {
panic(err)
}
}
}
func getStateMachine() *transition.StateMachine {
var orderStateMachine = transition.New(&Order{})
orderStateMachine.Initial("draft")
orderStateMachine.State("checkout")
orderStateMachine.State("paid")
orderStateMachine.State("processed")
orderStateMachine.State("delivered")
orderStateMachine.State("cancelled")
orderStateMachine.State("paid_cancelled")
orderStateMachine.Event("checkout").To("checkout").From("draft")
orderStateMachine.Event("pay").To("paid").From("checkout")
return orderStateMachine
}
func CreateOrderAndExecuteTransition(transition *transition.StateMachine, event string, order *Order) error {
if err := db.Save(order).Error; err != nil {
return err
}
if err := transition.Trigger(event, order, db); err != nil {
return err
}
return nil
}
func TestStateTransition(t *testing.T) {
order := &Order{}
if err := getStateMachine().Trigger("checkout", order, db); err != nil {
t.Errorf("should not raise any error when trigger event checkout")
}
if order.GetState() != "checkout" {
t.Errorf("state doesn't changed to checkout")
}
var stateChangeLogs = transition.GetStateChangeLogs(order, db)
if len(stateChangeLogs) != 1 {
t.Errorf("should get one state change log with GetStateChangeLogs")
} else {
var stateChangeLog = stateChangeLogs[0]
if stateChangeLog.From != "draft" {
t.Errorf("state from not set")
}
if stateChangeLog.To != "checkout" {
t.Errorf("state to not set")
}
}
}
func TestGetLastStateChange(t *testing.T) {
order := &Order{}
if err := getStateMachine().Trigger("checkout", order, db, "checkout note"); err != nil {
t.Errorf("should not raise any error when trigger event checkout")
}
if err := getStateMachine().Trigger("pay", order, db, "pay note"); err != nil {
t.Errorf("should not raise any error when trigger event checkout")
}
if order.GetState() != "paid" {
t.Errorf("state doesn't changed to paid")
}
var lastStateChange = transition.GetLastStateChange(order, db)
if lastStateChange.To != "paid" {
t.Errorf("state to not set")
} else {
if lastStateChange.From != "checkout" {
t.Errorf("state from not set")
}
if lastStateChange.Note != "pay note" {
t.Errorf("state note not set")
}
}
}
func TestMultipleTransitionWithOneEvent(t *testing.T) {
orderStateMachine := getStateMachine()
cancellEvent := orderStateMachine.Event("cancel")
cancellEvent.To("cancelled").From("draft", "checkout")
cancellEvent.To("paid_cancelled").From("paid", "processed")
unpaidOrder1 := &Order{}
if err := orderStateMachine.Trigger("cancel", unpaidOrder1, db); err != nil {
t.Errorf("should not raise any error when trigger event cancel")
}
if unpaidOrder1.State != "cancelled" {
t.Errorf("order status doesn't transitioned correctly")
}
unpaidOrder2 := &Order{}
unpaidOrder2.State = "draft"
if err := orderStateMachine.Trigger("cancel", unpaidOrder2, db); err != nil {
t.Errorf("should not raise any error when trigger event cancel")
}
if unpaidOrder2.State != "cancelled" {
t.Errorf("order status doesn't transitioned correctly")
}
paidOrder := &Order{}
paidOrder.State = "paid"
if err := orderStateMachine.Trigger("cancel", paidOrder, db); err != nil {
t.Errorf("should not raise any error when trigger event cancel")
}
if paidOrder.State != "paid_cancelled" {
t.Errorf("order status doesn't transitioned correctly")
}
}
func TestStateCallbacks(t *testing.T) {
orderStateMachine := getStateMachine()
order := &Order{}
address1 := "I'm an address should be set when enter checkout"
address2 := "I'm an address should be set when exit checkout"
orderStateMachine.State("checkout").Enter(func(order interface{}, tx *gorm.DB) error {
order.(*Order).Address = address1
return nil
}).Exit(func(order interface{}, tx *gorm.DB) error {
order.(*Order).Address = address2
return nil
})
if err := orderStateMachine.Trigger("checkout", order, db); err != nil {
t.Errorf("should not raise any error when trigger event checkout")
}
if order.Address != address1 {
t.Errorf("enter callback not triggered")
}
if err := orderStateMachine.Trigger("pay", order, db); err != nil {
t.Errorf("should not raise any error when trigger event pay")
}
if order.Address != address2 {
t.Errorf("exit callback not triggered")
}
}
func TestEventCallbacks(t *testing.T) {
var (
order = &Order{}
orderStateMachine = getStateMachine()
prevState, afterState string
)
orderStateMachine.Event("checkout").To("checkout").From("draft").Before(func(order interface{}, tx *gorm.DB) error {
prevState = order.(*Order).State
return nil
}).After(func(order interface{}, tx *gorm.DB) error {
afterState = order.(*Order).State
return nil
})
order.State = "draft"
if err := orderStateMachine.Trigger("checkout", order, nil); err != nil {
t.Errorf("should not raise any error when trigger event checkout")
}
if prevState != "draft" {
t.Errorf("Before callback triggered after state change")
}
if afterState != "checkout" {
t.Errorf("After callback triggered after state change")
}
}
func TestTransitionOnEnterCallbackError(t *testing.T) {
var (
order = &Order{}
orderStateMachine = getStateMachine()
)
orderStateMachine.State("checkout").Enter(func(order interface{}, tx *gorm.DB) (err error) {
return errors.New("intentional error")
})
if err := orderStateMachine.Trigger("checkout", order, nil); err == nil {
t.Errorf("should raise an intentional error")
}
if order.State != "draft" {
t.Errorf("state transitioned on Enter callback error")
}
}
func | (t *testing.T) {
var (
order = &Order{}
orderStateMachine = getStateMachine()
)
orderStateMachine.State("checkout").Exit(func(order interface{}, tx *gorm.DB) (err error) {
return errors.New("intentional error")
})
if err := orderStateMachine.Trigger("checkout", order, nil); err != nil {
t.Errorf("should not raise error when checkout")
}
if err := orderStateMachine.Trigger("pay", order, nil); err == nil {
t.Errorf("should raise an intentional error")
}
if order.State != "checkout" {
t.Errorf("state transitioned on Enter callback error")
}
}
func TestEventOnBeforeCallbackError(t *testing.T) {
var (
order = &Order{}
orderStateMachine = getStateMachine()
)
orderStateMachine.Event("checkout").To("checkout").From("draft").Before(func(order interface{}, tx *gorm.DB) error {
return errors.New("intentional error")
})
if err := orderStateMachine.Trigger("checkout", order, nil); err == nil {
t.Errorf("should raise an intentional error")
}
if order.State != "draft" {
t.Errorf("state transitioned on Enter callback error")
}
}
func TestEventOnAfterCallbackError(t *testing.T) {
var (
order = &Order{}
orderStateMachine = getStateMachine()
)
orderStateMachine.Event("checkout").To("checkout").From("draft").After(func(order interface{}, tx *gorm.DB) error {
return errors.New("intentional error")
})
if err := orderStateMachine.Trigger("checkout", order, nil); err == nil {
t.Errorf("should raise an intentional error")
}
if order.State != "draft" {
t.Errorf("state transitioned on Enter callback error")
}
}
| TestTransitionOnExitCallbackError |
mediaGalleryFragment.ts | import { gql } from '@apollo/client';
export interface MediaGalleryItemType {
disabled: boolean;
label: string;
position: number;
url: string;
}
export const MEDIA_GALLERY_FRAGMENT = gql`
fragment MediaGallery on ProductInterface {
mediaGallery: media_gallery {
disabled
label
position
url | }
}
`; |
|
middleware_tile_cache.go | package server
import (
"bytes"
"fmt"
"io"
"net/http"
"github.com/go-spatial/tegola/atlas"
"github.com/go-spatial/tegola/cache"
"github.com/go-spatial/tegola/internal/log"
"github.com/go-spatial/tegola/mvt"
)
// TileCacheHandler implements a request cache for tiles on requests when the URLs
// have a /:z/:x/:y scheme suffix (i.e. /osm/1/3/4.pbf)
func TileCacheHandler(a *atlas.Atlas, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var err error
// check if a cache backend exists
cacher := a.GetCache()
if cacher == nil {
// nope. move on
next.ServeHTTP(w, r)
return
}
// parse our URI into a cache key structure (pop off the "maps/" prefix)
// 5 is the value of len("maps/")
key, err := cache.ParseKey(r.URL.Path[5:])
if err != nil {
log.Errorf("cache middleware: ParseKey err: %v", err)
next.ServeHTTP(w, r)
return
}
// use the URL path as the key
cachedTile, hit, err := cacher.Get(key)
if err != nil |
// cache miss
if !hit {
// buffer which will hold a copy of the response for writing to the cache
var buff bytes.Buffer
// ovewrite our current responseWriter with a tileCacheResponseWriter
w = newTileCacheResponseWriter(w, &buff)
next.ServeHTTP(w, r)
// check if our request context has been canceled
if r.Context().Err() != nil {
return
}
// if nothing has been written to the buffer, don't write to the cache
if buff.Len() == 0 {
return
}
if err := cacher.Set(key, buff.Bytes()); err != nil {
log.Warnf("cache response writer err: %v", err)
}
return
}
// cors header
w.Header().Set("Access-Control-Allow-Origin", CORSAllowedOrigin)
// mimetype for mapbox vector tiles
w.Header().Add("Content-Type", mvt.MimeType)
// communicate the cache is being used
w.Header().Add("Tegola-Cache", "HIT")
w.Header().Add("Content-Length", fmt.Sprintf("%d", len(cachedTile)))
w.Write(cachedTile)
return
})
}
func newTileCacheResponseWriter(resp http.ResponseWriter, w io.Writer) http.ResponseWriter {
return &tileCacheResponseWriter{
resp: resp,
multi: io.MultiWriter(w, resp),
}
}
// tileCacheResponsWriter wraps http.ResponseWriter (https://golang.org/pkg/net/http/#ResponseWriter)
// to additionally write the response to a cache when there is a cache MISS
type tileCacheResponseWriter struct {
// status response code
status int
resp http.ResponseWriter
multi io.Writer
}
func (w *tileCacheResponseWriter) Header() http.Header {
// communicate the cache is being used
w.resp.Header().Set("Tegola-Cache", "MISS")
return w.resp.Header()
}
func (w *tileCacheResponseWriter) Write(b []byte) (int, error) {
// only write to the multi writer when http response == StatusOK
if w.status == http.StatusOK {
// write to our multi writer
return w.multi.Write(b)
}
// write to the original response writer
return w.resp.Write(b)
}
func (w *tileCacheResponseWriter) WriteHeader(i int) {
w.status = i
w.resp.WriteHeader(i)
}
| {
log.Errorf("cache middleware: error reading from cache: %v", err)
next.ServeHTTP(w, r)
return
} |
offset_date_time.rs | use std::cmp::Ordering;
#[cfg(feature = "std")]
use std::time::SystemTime;
use time::{error, prelude::*, Format, OffsetDateTime, UtcOffset, Weekday};
#[test]
#[cfg(feature = "std")]
fn now_utc() {
assert!(OffsetDateTime::now_utc().year() >= 2019);
assert_eq!(OffsetDateTime::now_utc().offset(), offset!(UTC));
}
#[test]
#[cfg(feature = "std")]
fn now_local() {
assert!(OffsetDateTime::now_local().year() >= 2019);
assert_eq!(
OffsetDateTime::now_local().offset(),
UtcOffset::current_local_offset()
);
}
#[test]
#[cfg(feature = "std")]
fn try_now_local() {
assert!(OffsetDateTime::try_now_local().is_ok());
}
#[test]
fn to_offset() {
assert_eq!(
date!(2000 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.year(),
1999,
);
let sydney = date!(2000 - 01 - 01).midnight().assume_offset(offset!(+11));
let new_york = sydney.to_offset(offset!(-5));
let los_angeles = sydney.to_offset(offset!(-8));
assert_eq!(sydney.hour(), 0);
assert_eq!(sydney.day(), 1);
assert_eq!(new_york.hour(), 8);
assert_eq!(new_york.day(), 31);
assert_eq!(los_angeles.hour(), 5);
assert_eq!(los_angeles.day(), 31);
}
#[test]
fn unix_epoch() {
assert_eq!(
OffsetDateTime::unix_epoch(),
date!(1970 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn from_unix_timestamp() {
assert_eq!(
OffsetDateTime::from_unix_timestamp(0),
OffsetDateTime::unix_epoch(),
);
assert_eq!(
OffsetDateTime::from_unix_timestamp(1_546_300_800),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn from_unix_timestamp_nanos() {
assert_eq!(
OffsetDateTime::from_unix_timestamp_nanos(0),
OffsetDateTime::unix_epoch(),
);
assert_eq!(
OffsetDateTime::from_unix_timestamp_nanos(1_546_300_800_000_000_000),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn offset() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().offset(),
offset!(UTC),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_offset(offset!(+1))
.offset(),
offset!(+1),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(+1))
.offset(),
offset!(+1),
);
}
#[test]
fn timestamp() {
assert_eq!(OffsetDateTime::unix_epoch().timestamp(), 0);
assert_eq!(
OffsetDateTime::unix_epoch()
.to_offset(offset!(+1))
.timestamp(),
0,
);
assert_eq!(
date!(1970 - 01 - 01)
.midnight()
.assume_offset(offset!(-1))
.timestamp(),
3_600,
);
}
#[test]
fn timestamp_nanos() {
assert_eq!(
date!(1970 - 01 - 01)
.midnight()
.assume_utc()
.timestamp_nanos(),
0,
);
assert_eq!(
date!(1970 - 01 - 01)
.with_time(time!(1:00))
.assume_utc()
.to_offset(offset!(-1))
.timestamp_nanos(),
3_600_000_000_000,
);
}
#[test]
fn date() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().date(),
date!(2019 - 01 - 01),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.date(),
date!(2018 - 12 - 31),
);
}
#[test]
fn time() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().time(),
time!(0:00),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.time(),
time!(23:00),
);
}
#[test]
fn year() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().year(), 2019);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.year(),
2020,
);
assert_eq!(date!(2020 - 01 - 01).midnight().assume_utc().year(), 2020);
}
#[test]
fn month() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().month(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.month(),
1,
);
}
#[test]
fn day() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().day(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.day(),
1,
);
}
#[test]
fn month_day() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().month_day(),
(1, 1),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.month_day(),
(1, 1),
);
}
#[test]
fn ordinal() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().ordinal(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.ordinal(),
1,
);
}
#[test]
fn iso_year_week() {
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2019, 1)
);
assert_eq!(
date!(2019 - 10 - 04)
.midnight()
.assume_utc()
.iso_year_week(),
(2019, 40)
);
assert_eq!(
date!(2020 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 1)
);
assert_eq!(
date!(2020 - 12 - 31)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 53)
);
assert_eq!(
date!(2021 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 53)
);
}
#[test]
fn week() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().week(), 1);
assert_eq!(date!(2020 - 01 - 01).midnight().assume_utc().week(), 1);
assert_eq!(date!(2020 - 12 - 31).midnight().assume_utc().week(), 53);
assert_eq!(date!(2021 - 01 - 01).midnight().assume_utc().week(), 53);
}
#[test]
fn weekday() {
use Weekday::*;
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().weekday(),
Tuesday
);
assert_eq!(
date!(2019 - 02 - 01).midnight().assume_utc().weekday(),
Friday
);
assert_eq!(
date!(2019 - 03 - 01).midnight().assume_utc().weekday(),
Friday
);
}
#[test]
fn hour() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().hour(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(-2))
.hour(),
21,
);
}
#[test]
fn minute() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().minute(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(+0:30))
.minute(),
29,
);
}
#[test]
fn second() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().second(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(+0:00:30))
.second(),
29,
);
}
#[test]
fn millisecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().millisecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999))
.assume_utc()
.millisecond(),
999,
);
}
#[test]
fn microsecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().microsecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999_999))
.assume_utc()
.microsecond(),
999_999,
);
}
#[test]
fn nanosecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().nanosecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999_999_999))
.assume_utc()
.nanosecond(),
999_999_999,
);
}
#[test]
fn format() {
assert_eq!(
date!(2019 - 01 - 02)
.midnight()
.assume_utc()
.format("%F %r %z"),
"2019-01-02 12:00:00 am +0000",
);
assert_eq!(
date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_offset(offset!(+6:07))
.format(Format::Rfc3339),
"2019-01-02T03:04:05+06:07"
);
}
#[test]
fn parse() {
assert_eq!(
OffsetDateTime::parse("2019-01-02 00:00:00 +0000", "%F %T %z"),
Ok(date!(2019 - 01 - 02).midnight().assume_utc()),
);
assert_eq!(
OffsetDateTime::parse("2019-002 23:59:59 +0000", "%Y-%j %T %z"),
Ok(date!(2019 - 002).with_time(time!(23:59:59)).assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-W01-3 12:00:00 pm +0000", "%G-W%V-%u %r %z"),
Ok(date!(2019 - 002).with_time(time!(12:00)).assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-01-02 03:04:05 +0600", "%F %T %z"),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05))
.assume_offset(offset!(+6)))
);
assert_eq!(
OffsetDateTime::parse("2020-09-08T08:44:31+02:30", Format::Rfc3339),
Ok(date!(2020 - 09 - 08)
.with_time(time!(08:44:31))
.assume_offset(offset!(+02:30)))
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.678901234+05:06", Format::Rfc3339),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_offset(offset!(+5:06)))
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.678901234Z", Format::Rfc3339),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05/", Format::Rfc3339),
Err(error::Parse::UnexpectedCharacter {
actual: '/',
expected: '+'
})
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05", Format::Rfc3339),
Err(error::Parse::UnexpectedEndOfString)
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.", Format::Rfc3339),
Err(error::Parse::InvalidNanosecond)
);
}
#[test]
fn partial_eq() {
assert_eq!(
date!(2000 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1)),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn partial_ord() {
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1));
assert_eq!(t1.partial_cmp(&t2), Some(Ordering::Equal));
}
#[test]
fn ord() {
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1));
assert_eq!(t1, t2);
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc();
assert!(t2 > t1);
}
#[test]
#[cfg(feature = "std")]
fn hash() {
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
assert_eq!(
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.hash(&mut hasher);
hasher.finish()
},
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.hash(&mut hasher);
hasher.finish()
}
);
// Ensure that a `PrimitiveDateTime` and `OffsetDateTime` don't collide,
// even if the UTC time is the same.
assert_ne!(
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01).midnight().hash(&mut hasher);
hasher.finish()
},
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.hash(&mut hasher);
hasher.finish()
}
);
}
#[test]
fn add_duration() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc() + 5.days(),
date!(2019 - 01 - 06).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc() + 1.days(),
date!(2020 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
+ 2.seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() + (-2).seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
assert_eq!(
date!(1999 - 12 - 31).with_time(time!(23:00)).assume_utc() + 1.hours(),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn add_std_duration() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc() + 5.std_days(),
date!(2019 - 01 - 06).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc() + 1.std_days(),
date!(2020 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
+ 2.std_seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
}
#[test]
fn add_assign_duration() {
let mut ny19 = date!(2019 - 01 - 01).midnight().assume_utc();
ny19 += 5.days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = date!(2019 - 12 - 31).midnight().assume_utc();
nye20 += 1.days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t += 2.seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t += (-2).seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
fn add_assign_std_duration() {
let mut ny19 = date!(2019 - 01 - 01).midnight().assume_utc();
ny19 += 5.std_days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = date!(2019 - 12 - 31).midnight().assume_utc();
nye20 += 1.std_days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t += 2.std_seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_duration() {
assert_eq!(
date!(2019 - 01 - 06).midnight().assume_utc() - 5.days(),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc() - 1.days(),
date!(2019 - 12 - 31).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() - 2.seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
- (-2).seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
assert_eq!(
date!(1999 - 12 - 31).with_time(time!(23:00)).assume_utc() - (-1).hours(),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn sub_std_duration() {
assert_eq!(
date!(2019 - 01 - 06).midnight().assume_utc() - 5.std_days(),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc() - 1.std_days(),
date!(2019 - 12 - 31).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() - 2.std_seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
}
#[test]
fn sub_assign_duration() {
let mut ny19 = date!(2019 - 01 - 06).midnight().assume_utc();
ny19 -= 5.days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = date!(2020 - 01 - 01).midnight().assume_utc();
ny20 -= 1.days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t -= 2.seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t -= (-2).seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_assign_std_duration() {
let mut ny19 = date!(2019 - 01 - 06).midnight().assume_utc();
ny19 -= 5.std_days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = date!(2020 - 01 - 01).midnight().assume_utc();
ny20 -= 1.std_days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t -= 2.std_seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn std_add_duration() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()) + 0.seconds(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()) + 5.days(),
SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()) + 1.days(),
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
) + 2.seconds(),
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
+ (-2).seconds(),
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
),
);
}
#[test]
#[cfg(feature = "std")]
fn std_add_assign_duration() {
let mut ny19 = SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc());
ny19 += 5.days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc());
nye20 += 1.days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
nye20t += 2.seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
let mut ny20t = SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc());
ny20t += (-2).seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn std_sub_duration() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc()) - 5.days(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()) - 1.days(),
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
- 2.seconds(),
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
),
);
assert_eq!(
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
) - (-2).seconds(),
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()),
);
}
#[test]
#[cfg(feature = "std")]
fn std_sub_assign_duration() {
let mut ny19 = SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc());
ny19 -= 5.days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc());
ny20 -= 1.days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc());
ny20t -= 2.seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
let mut nye20t = SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
nye20t -= (-2).seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_self() |
#[test]
#[cfg(feature = "std")]
fn std_sub() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
- date!(2019 - 01 - 01).midnight().assume_utc(),
1.days()
);
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
- date!(2019 - 01 - 02).midnight().assume_utc(),
(-1).days()
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
- date!(2019 - 12 - 31).midnight().assume_utc(),
1.days()
);
assert_eq!(
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc())
- date!(2020 - 01 - 01).midnight().assume_utc(),
(-1).days()
);
}
#[test]
#[cfg(feature = "std")]
fn sub_std() {
assert_eq!(
date!(2019 - 01 - 02).midnight().assume_utc()
- SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
1.days()
);
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc()
- SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc()),
(-1).days()
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc()
- SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()),
1.days()
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc()
- SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()),
(-1).days()
);
}
#[test]
#[cfg(feature = "std")]
#[allow(deprecated)]
fn eq_std() {
let now_datetime = OffsetDateTime::now();
let now_systemtime = SystemTime::from(now_datetime);
assert_eq!(now_datetime, now_systemtime);
}
#[test]
#[cfg(feature = "std")]
#[allow(deprecated)]
fn std_eq() {
let now_datetime = OffsetDateTime::now();
let now_systemtime = SystemTime::from(now_datetime);
assert_eq!(now_datetime, now_systemtime);
}
#[test]
#[cfg(feature = "std")]
fn ord_std() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 02 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.001))
.assume_utc()
)
);
assert!(
date!(2020 - 01 - 01).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 02 - 01).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 02).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
}
#[test]
#[cfg(feature = "std")]
fn std_ord() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2020 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 02 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 02).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc()
);
assert!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 02 - 01).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.001))
.assume_utc()
) > date!(2019 - 01 - 01).midnight().assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn from_std() {
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH),
OffsetDateTime::unix_epoch()
);
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH - 1.std_days()),
OffsetDateTime::unix_epoch() - 1.days()
);
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH + 1.std_days()),
OffsetDateTime::unix_epoch() + 1.days()
);
}
#[test]
#[cfg(feature = "std")]
fn to_std() {
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch()),
SystemTime::UNIX_EPOCH
);
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch() + 1.days()),
SystemTime::UNIX_EPOCH + 1.std_days()
);
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch() - 1.days()),
SystemTime::UNIX_EPOCH - 1.std_days()
);
}
#[test]
#[cfg(feature = "std")]
fn display() {
assert_eq!(
date!(1970 - 01 - 01).midnight().assume_utc().to_string(),
String::from("1970-01-01 0:00 +0")
);
}
| {
assert_eq!(
date!(2019 - 01 - 02).midnight().assume_utc()
- date!(2019 - 01 - 01).midnight().assume_utc(),
1.days(),
);
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc()
- date!(2019 - 01 - 02).midnight().assume_utc(),
(-1).days(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc()
- date!(2019 - 12 - 31).midnight().assume_utc(),
1.days(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc()
- date!(2020 - 01 - 01).midnight().assume_utc(),
(-1).days(),
);
} |
incoming_dht_ops_workflow.rs | //! The workflow and queue consumer for DhtOp integration
use super::error::WorkflowResult;
use super::sys_validation_workflow::counterfeit_check;
use crate::core::queue_consumer::TriggerSender;
use holo_hash::DhtOpHash;
use holochain_sqlite::error::DatabaseResult;
use holochain_sqlite::prelude::*;
use holochain_state::prelude::*;
use holochain_types::dht_op::DhtOp;
use holochain_types::prelude::*;
use once_cell::sync::Lazy;
use std::collections::{HashMap, HashSet};
use tracing::instrument;
#[cfg(test)]
mod test;
type InOpBatchSnd = tokio::sync::oneshot::Sender<WorkflowResult<()>>;
type InOpBatchRcv = tokio::sync::oneshot::Receiver<WorkflowResult<()>>;
struct InOpBatchEntry {
snd: InOpBatchSnd,
request_validation_receipt: bool,
ops: Vec<(DhtOpHash, DhtOp)>,
}
struct InOpBatch {
is_running: bool,
pending: Vec<InOpBatchEntry>,
}
impl Default for InOpBatch {
fn default() -> Self |
}
static IN_OP_BATCH: Lazy<parking_lot::Mutex<HashMap<DbKind, InOpBatch>>> =
Lazy::new(|| parking_lot::Mutex::new(HashMap::new()));
/// if result.0.is_none() -- we queued it to send later
/// if result.0.is_some() -- the batch should be run now
fn batch_check_insert(
kind: DbKind,
request_validation_receipt: bool,
ops: Vec<(DhtOpHash, DhtOp)>,
) -> (Option<Vec<InOpBatchEntry>>, InOpBatchRcv) {
let (snd, rcv) = tokio::sync::oneshot::channel();
let entry = InOpBatchEntry {
snd,
request_validation_receipt,
ops,
};
let mut lock = IN_OP_BATCH.lock();
let batch = lock.entry(kind).or_insert_with(InOpBatch::default);
if batch.is_running {
// there is already a batch running, just queue this
batch.pending.push(entry);
(None, rcv)
} else {
// no batch running, run this (and assert we never collect straglers
assert!(batch.pending.is_empty());
batch.is_running = true;
(Some(vec![entry]), rcv)
}
}
/// if result.is_none() -- we are done, end the loop for now
/// if result.is_some() -- we got more items to process
fn batch_check_end(kind: DbKind) -> Option<Vec<InOpBatchEntry>> {
let mut lock = IN_OP_BATCH.lock();
let batch = lock.entry(kind).or_insert_with(InOpBatch::default);
assert!(batch.is_running);
let out: Vec<InOpBatchEntry> = batch.pending.drain(..).collect();
if out.is_empty() {
// pending was empty, we can end the loop for now
batch.is_running = false;
None
} else {
// we have some more pending, continue the running loop
Some(out)
}
}
fn batch_process_entry(
txn: &mut rusqlite::Transaction<'_>,
request_validation_receipt: bool,
ops: Vec<(DhtOpHash, DhtOp)>,
) -> WorkflowResult<()> {
// add incoming ops to the validation limbo
let mut to_pending = Vec::with_capacity(ops.len());
for (hash, op) in ops {
if !op_exists_inner(txn, &hash)? {
let op = DhtOpHashed::from_content_sync(op);
to_pending.push(op);
} else {
// Check if we should set receipt to send.
if request_validation_receipt {
set_send_receipt(txn, hash.clone())?;
}
}
}
add_to_pending(txn, to_pending, request_validation_receipt)?;
Ok(())
}
#[derive(Default, Clone)]
pub struct IncomingOpHashes(Arc<parking_lot::Mutex<HashSet<DhtOpHash>>>);
#[instrument(skip(vault, sys_validation_trigger, ops, incoming_op_hashes))]
pub async fn incoming_dht_ops_workflow(
vault: &EnvWrite,
incoming_op_hashes: Option<&IncomingOpHashes>,
sys_validation_trigger: TriggerSender,
mut ops: Vec<(holo_hash::DhtOpHash, holochain_types::dht_op::DhtOp)>,
request_validation_receipt: bool,
) -> WorkflowResult<()> {
let mut filter_ops = Vec::new();
let mut hashes_to_remove = if incoming_op_hashes.is_some() {
Vec::with_capacity(ops.len())
} else {
Vec::with_capacity(0)
};
// Filter out ops that are already being tracked, so we don't do duplicate work
if let Some(incoming_op_hashes) = &incoming_op_hashes {
let mut set = incoming_op_hashes.0.lock();
let mut o = Vec::with_capacity(ops.len());
for (hash, op) in ops {
if !set.contains(&hash) {
set.insert(hash.clone());
hashes_to_remove.push(hash.clone());
o.push((hash, op));
}
}
ops = o;
}
if ops.is_empty() {
return Ok(());
}
for (hash, op) in ops {
// It's cheaper to check if the op exists before trying
// to check the signature or open a write transaction.
if !op_exists(vault, hash.clone()).await? {
match should_keep(&op).await {
Ok(()) => filter_ops.push((hash, op)),
Err(e) => {
tracing::warn!(
msg = "Dropping op because it failed counterfeit checks",
?op
);
return Err(e);
}
}
}
}
let kind = vault.kind().clone();
let (mut maybe_batch, rcv) =
batch_check_insert(kind.clone(), request_validation_receipt, filter_ops);
let vault = vault.clone();
if maybe_batch.is_some() {
// there was no already running batch task, so spawn one:
tokio::task::spawn(async move {
while let Some(entries) = maybe_batch {
let senders = Arc::new(parking_lot::Mutex::new(Vec::new()));
let senders2 = senders.clone();
if let Err(err) = vault
.async_commit(move |txn| {
for entry in entries {
let InOpBatchEntry {
snd,
request_validation_receipt,
ops,
} = entry;
let res = batch_process_entry(txn, request_validation_receipt, ops);
// we can't send the results here...
// we haven't comitted
senders2.lock().push((snd, res));
}
WorkflowResult::Ok(())
})
.await
{
tracing::error!(?err, "incoming_dht_ops_workflow error");
}
for (snd, res) in senders.lock().drain(..) {
let _ = snd.send(res);
}
// trigger validation of queued ops
sys_validation_trigger.trigger();
maybe_batch = batch_check_end(kind.clone());
}
});
}
let r = rcv
.await
.map_err(|_| super::error::WorkflowError::RecvError)?;
if let Some(incoming_op_hashes) = &incoming_op_hashes {
let mut set = incoming_op_hashes.0.lock();
for hash in hashes_to_remove {
set.remove(&hash);
}
}
r
}
#[instrument(skip(op))]
/// If this op fails the counterfeit check it should be dropped
async fn should_keep(op: &DhtOp) -> WorkflowResult<()> {
let header = op.header();
let signature = op.signature();
Ok(counterfeit_check(signature, &header).await?)
}
fn add_to_pending(
txn: &mut rusqlite::Transaction<'_>,
ops: Vec<DhtOpHashed>,
request_validation_receipt: bool,
) -> StateMutationResult<()> {
for op in ops {
let op_hash = op.as_hash().clone();
insert_op(txn, op, false)?;
set_require_receipt(txn, op_hash, request_validation_receipt)?;
}
StateMutationResult::Ok(())
}
fn op_exists_inner(txn: &rusqlite::Transaction<'_>, hash: &DhtOpHash) -> DatabaseResult<bool> {
DatabaseResult::Ok(txn.query_row(
"
SELECT EXISTS(
SELECT
1
FROM DhtOp
WHERE
DhtOp.hash = :hash
)
",
named_params! {
":hash": hash,
},
|row| row.get(0),
)?)
}
pub async fn op_exists(vault: &EnvWrite, hash: DhtOpHash) -> DatabaseResult<bool> {
vault
.async_reader(move |txn| op_exists_inner(&txn, &hash))
.await
}
fn set_send_receipt(
txn: &mut rusqlite::Transaction<'_>,
hash: DhtOpHash,
) -> StateMutationResult<()> {
set_require_receipt(txn, hash, true)?;
StateMutationResult::Ok(())
}
| {
Self {
is_running: false,
pending: Vec::new(),
}
} |
TextBolder.tsx | /* GENERATED FILE */ |
import { IconProps } from '../lib';
function TextBolder(props: IconProps) {
return (
<Svg
id="Raw"
viewBox="0 0 256 256"
width={props.size}
height={props.size}
{...props}
>
<Rect width={256} height={256} fill="none" />
<Path
d="M64,120h88a40,40,0,0,1,0,80l-88.00586-.00488v-152L140,48a36,36,0,0,1,0,72"
fill="none"
stroke={props.color}
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={8}
/>
</Svg>
);
}
export default TextBolder; | import * as React from 'react';
import Svg, { Rect, Path } from 'react-native-svg'; |
utils.py | from functools import wraps
from os import environ
from backends.exceptions import ErrorException
def wrap_exception(exception_type, error_message):
def _typed_exception_wrapper(func):
@wraps(func)
def _adapt_exception_types(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_type as ex:
raise ErrorException(error_message) from ex
return _adapt_exception_types
return _typed_exception_wrapper
def getenv_required(key):
try:
return environ[key]
except KeyError:
raise ErrorException(
'Required environment variable %s not set' % key)
def getenv_int(key, default): | value = environ[key]
except KeyError:
return default
try:
return int(value)
except ValueError:
raise ErrorException(
'Environment variable %s with value %s '
'is not convertible to int' % (key, value)) | try: |
hashmap2.rs | // hashmap2.rs
// A basket of fruits in the form of a hash map is given. The key
// represents the name of the fruit and the value represents how many
// of that particular fruit is in the basket. You have to put *MORE
// THAN 11* fruits in the basket. Three types of fruits - Apple (4),
// Mango (2) and Lychee (5) are already given in the basket. You are
// not allowed to insert any more of these fruits!
//
// Make me pass the tests!
//
// Execute the command `rustlings hint hashmap2` if you need
// hints.
use std::collections::HashMap;
#[derive(Hash, PartialEq, Eq)]
enum Fruit {
Apple,
Banana,
Mango,
Lychee,
Pineapple,
}
fn fruit_basket(basket: &mut HashMap<Fruit, u32>) {
let fruit_kinds = vec![
Fruit::Apple,
Fruit::Banana,
Fruit::Mango,
Fruit::Lychee,
Fruit::Pineapple,
];
for fruit in fruit_kinds {
if !basket.contains_key(&fruit) {
basket.insert(fruit, 2);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn get_fruit_basket() -> HashMap<Fruit, u32> {
let mut basket = HashMap::<Fruit, u32>::new();
basket.insert(Fruit::Apple, 4);
basket.insert(Fruit::Mango, 2);
basket.insert(Fruit::Lychee, 5);
basket
}
#[test]
fn test_given_fruits_are_not_modified() {
let mut basket = get_fruit_basket();
fruit_basket(&mut basket);
assert_eq!(*basket.get(&Fruit::Apple).unwrap(), 4);
assert_eq!(*basket.get(&Fruit::Mango).unwrap(), 2);
assert_eq!(*basket.get(&Fruit::Lychee).unwrap(), 5);
}
#[test]
fn at_least_five_types_of_fruits() |
#[test]
fn greater_than_eleven_fruits() {
let mut basket = get_fruit_basket();
fruit_basket(&mut basket);
let count = basket.values().sum::<u32>();
assert!(count > 11);
}
}
| {
let mut basket = get_fruit_basket();
fruit_basket(&mut basket);
let count_fruit_kinds = basket.len();
assert!(count_fruit_kinds >= 5);
} |
test_jcampdx.py | from brukerapi.jcampdx import JCAMPDX
import numpy as np
from pathlib import Path
import pytest
@pytest.mark.skip(reason="in progress")
def test_jcampdx(test_jcampdx_data):
| j = JCAMPDX(Path(test_jcampdx_data[1]) / test_jcampdx_data[0]['path'])
for key, ref in test_jcampdx_data[0]['parameters'].items():
parameter_test = j.get_parameter(key)
size_test= parameter_test.size
value_test= parameter_test.value
type_test = value_test.__class__
value_ref = ref['value']
size_ref = ref['size']
type_ref = ref['type']
#test SIZE
if size_ref == 'None':
size_ref = None
if isinstance(size_ref, list):
size_ref = tuple(size_ref)
elif isinstance(size_ref, int):
size_ref = (size_ref,)
assert size_ref == size_test
#test TYPE
assert type_ref == type_test.__name__
#test VALUE
if isinstance(value_test, np.ndarray):
value_ref = np.array(value_ref)
assert np.array_equal(value_ref, value_test)
elif isinstance(value_test, list):
assert value_test == value_ref
else:
assert value_ref == value_test |
|
message_test.go | package memorycache
import (
. "gopkg.in/check.v1"
"testing"
)
func | (t *testing.T) {
TestingT(t)
}
type CreateMessageTestsSuite struct{}
var _ = Suite(&CreateMessageTestsSuite{})
func (s *CreateMessageTestsSuite) Test_Res(c *C) {
//c.Skip("Not now")
b := Res{}
c.Assert(b, NotNil)
}
func (s *CreateMessageTestsSuite) Test_Request(c *C) {
//c.Skip("Not now")
b := Request{}
c.Assert(b, NotNil)
}
| TestCreateMessage |
geo-code-result.model.ts | export class GeoCodeResult {
constructor (public address: string){}
} |
||
loom_watch.rs | use crate::sync::watch;
use loom::future::block_on;
use loom::thread;
#[test]
fn smoke() {
loom::model(|| {
let (tx, mut rx) = watch::channel(1);
let th = thread::spawn(move || {
tx.send(2).unwrap(); | assert_eq!(*rx.borrow(), 2);
th.join().unwrap();
})
} | });
block_on(rx.changed()).unwrap(); |
unit.pipe.ts | import {Pipe, PipeTransform} from '@angular/core';
declare const math: any;
/*
* Raise the value exponentially
* Takes an exponent argument that defaults to 1.
* Usage:
* value | unit:unitFrom:unitTo
* Example:
* {{ 2000000 | unit:Wh:MWh}} MWh
* will display: 2 MWh
*/
@Pipe({name: 'unit'})
export class | implements PipeTransform {
transform(value: number, unitFrom: String, unitTo?: String): any {
if (value) {
const metric = unitTo ? math.eval(`${value} ${unitFrom} to ${unitTo}`) : math.unit(value, unitFrom);
return unitFrom === 'VAR' ? metric.value.im : metric.toNumber();
} else {
return '';
}
}
}
| UnitPipe |
entry_api.rs | use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use rocket::response::status::NotFound;
use rocket::serde::json::Json;
use rocket::serde::{Deserialize, Serialize};
use rocket::State;
use rocket::{get, post};
use crate::helper::exec_wrapper::meili_exec::feed_document_async;
use quake_core::entry::entry_file::EntryFile;
use quake_core::entry::entry_paths::EntryPaths;
use quake_core::entry::EntryDefines;
use quake_core::helper::file_filter;
use quake_core::usecases::entry_usecases;
use quake_core::QuakeConfig;
use crate::server::ApiError;
#[get("/defines")]
pub(crate) async fn get_entry_defines(conf: &State<QuakeConfig>) -> String {
let path = PathBuf::from(&conf.workspace);
let defines = EntryDefines::from_path(&path.join(EntryPaths::entries_define()));
serde_json::to_string(&defines).unwrap()
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(crate = "rocket::serde")]
struct EntryResponse {
content: String,
}
#[post("/<entry_type>?<text>")]
pub(crate) async fn create_entry(
entry_type: &str,
text: &str,
config: &State<QuakeConfig>,
) -> Result<Json<EntryFile>, NotFound<Json<ApiError>>> {
let workspace = config.workspace.to_string();
return match entry_usecases::create_entry(&workspace, entry_type, text) {
Ok((_path, file)) => {
let _ = feed_entry(&config.search_url, entry_type, &file);
Ok(Json(file))
}
Err(err) => Err(NotFound(Json(ApiError {
msg: err.to_string(),
}))),
};
}
#[get("/<entry_type>/<id>")]
pub(crate) async fn | (
entry_type: &str,
id: usize,
config: &State<QuakeConfig>,
) -> Result<Json<EntryFile>, NotFound<Json<ApiError>>> {
let file_path = entry_by_id(entry_type, id, config)?;
let str = fs::read_to_string(file_path).expect("cannot read entry type");
let file = EntryFile::from(str.as_str(), id).unwrap();
Ok(Json(file))
}
pub fn entry_by_id(
entry_type: &str,
id: usize,
config: &State<QuakeConfig>,
) -> Result<PathBuf, NotFound<Json<ApiError>>> {
let base_path = PathBuf::from(&config.workspace).join(entry_type);
let entries = file_filter::filter_by_prefix(base_path, EntryFile::file_prefix(id));
if entries.is_empty() {
return Err(NotFound(Json(ApiError {
msg: "file not found".to_string(),
})));
}
Ok(entries[0].clone())
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(crate = "rocket::serde")]
pub struct EntryUpdate {
properties: HashMap<String, String>,
}
#[post("/<entry_type>/<id>", data = "<entry>")]
pub(crate) async fn update_entry(
entry_type: &str,
id: usize,
entry: Json<EntryUpdate>,
config: &State<QuakeConfig>,
) -> Result<Json<EntryFile>, NotFound<Json<ApiError>>> {
let path = PathBuf::from(&config.workspace).join(entry_type);
return match entry_usecases::update_entry_properties(path, entry_type, id, &entry.properties) {
Ok(file) => {
let _ = feed_entry(&config.search_url, entry_type, &file);
Ok(Json(file))
}
Err(err) => Err(NotFound(Json(ApiError {
msg: err.to_string(),
}))),
};
}
pub fn feed_entry(server_url: &str, index_name: &str, content: &EntryFile) {
let _ = feed_document_async(server_url, index_name, content);
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use std::fs;
use std::io::Read;
use rocket::form::validate::Contains;
use rocket::http::Status;
use rocket::local::blocking::Client;
use quake_core::entry::entry_paths::EntryPaths;
use crate::quake_rocket;
use crate::server::entry_api::EntryUpdate;
#[test]
fn crud_for_entry() {
// create entry
let client = Client::tracked(quake_rocket()).expect("valid rocket instance");
let response = client.post("/entry/test_quake?text=demo").dispatch();
assert_eq!(response.status(), Status::Ok);
// update entry date
let created_time = "2021-12-13 20:45:51";
let req = create_update_req(created_time);
let string = serde_json::to_string(&req).unwrap();
let response = client.post("/entry/test_quake/1").body(string).dispatch();
assert_eq!(response.status(), Status::Ok);
// assert for entry time is update
let mut response = client.get("/entry/test_quake/1").dispatch();
let mut res = "".to_string();
let _ = response.read_to_string(&mut res);
assert_eq!(response.status(), Status::Ok);
assert!(res.contains(created_time));
assert!(res.contains("\"id\":1"));
assert!(res.contains("\"title\":\"demo\""));
let paths = EntryPaths::init(&"examples".to_string(), &"test_quake".to_string());
fs::remove_dir_all(paths.entry_path).unwrap();
}
fn create_update_req(time: &str) -> EntryUpdate {
let mut fields: HashMap<String, String> = HashMap::new();
fields.insert("created_date".to_string(), time.to_string());
fields.insert("updated_date".to_string(), time.to_string());
EntryUpdate { properties: fields }
}
}
| get_entry |
issue_tracked_time.go | // Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"fmt"
"net/http"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/convert"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/routers/api/v1/utils"
)
// ListTrackedTimes list all the tracked times of an issue
func ListTrackedTimes(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/issues/{index}/times issue issueTrackedTimes
// ---
// summary: List an issue's tracked times
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: index
// in: path
// description: index of the issue
// type: integer
// format: int64
// required: true
// - name: user
// in: query
// description: optional filter by user (available for issue managers)
// type: string
// - name: since
// in: query
// description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// - name: before
// in: query
// description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// - name: page
// in: query
// description: page number of results to return (1-based)
// type: integer
// - name: limit
// in: query
// description: page size of results
// type: integer
// responses:
// "200":
// "$ref": "#/responses/TrackedTimeList"
// "404":
// "$ref": "#/responses/notFound"
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.NotFound("Timetracker is disabled")
return
}
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
}
return
}
opts := &models.FindTrackedTimesOptions{
ListOptions: utils.GetListOptions(ctx),
RepositoryID: ctx.Repo.Repository.ID,
IssueID: issue.ID,
}
qUser := ctx.FormTrim("user")
if qUser != "" {
user, err := models.GetUserByName(qUser)
if models.IsErrUserNotExist(err) {
ctx.Error(http.StatusNotFound, "User does not exist", err)
} else if err != nil {
ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
return
}
opts.UserID = user.ID
}
if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = utils.GetQueryBeforeSince(ctx); err != nil {
ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
return
}
cantSetUser := !ctx.User.IsAdmin &&
opts.UserID != ctx.User.ID &&
!ctx.IsUserRepoWriter([]unit.Type{unit.TypeIssues})
if cantSetUser {
if opts.UserID == 0 {
opts.UserID = ctx.User.ID
} else {
ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
return
}
}
count, err := models.CountTrackedTimes(opts)
if err != nil {
ctx.InternalServerError(err)
return
}
trackedTimes, err := models.GetTrackedTimes(opts)
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
return
}
if err = trackedTimes.LoadAttributes(); err != nil {
ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
return
}
ctx.SetTotalCountHeader(count)
ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(trackedTimes))
}
// AddTime add time manual to the given issue
func AddTime(ctx *context.APIContext) {
// swagger:operation Post /repos/{owner}/{repo}/issues/{index}/times issue issueAddTime
// ---
// summary: Add tracked time to a issue
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: index
// in: path
// description: index of the issue
// type: integer
// format: int64
// required: true
// - name: body
// in: body
// schema:
// "$ref": "#/definitions/AddTimeOption"
// responses:
// "200":
// "$ref": "#/responses/TrackedTime"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
form := web.GetForm(ctx).(*api.AddTimeOption)
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
}
return
}
if !ctx.Repo.CanUseTimetracker(issue, ctx.User) {
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
return
}
ctx.Status(http.StatusForbidden)
return
}
user := ctx.User
if form.User != "" {
if (ctx.IsUserRepoAdmin() && ctx.User.Name != form.User) || ctx.User.IsAdmin {
//allow only RepoAdmin, Admin and User to add time
user, err = models.GetUserByName(form.User)
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
}
}
}
created := time.Time{}
if !form.Created.IsZero() {
created = form.Created
}
trackedTime, err := models.AddTime(user, issue, form.Time, created)
if err != nil {
ctx.Error(http.StatusInternalServerError, "AddTime", err)
return
}
if err = trackedTime.LoadAttributes(); err != nil {
ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
return
}
ctx.JSON(http.StatusOK, convert.ToTrackedTime(trackedTime))
}
// ResetIssueTime reset time manual to the given issue
func ResetIssueTime(ctx *context.APIContext) {
// swagger:operation Delete /repos/{owner}/{repo}/issues/{index}/times issue issueResetTime
// ---
// summary: Reset a tracked time of an issue
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: index
// in: path
// description: index of the issue to add tracked time to
// type: integer
// format: int64
// required: true
// responses:
// "204":
// "$ref": "#/responses/empty"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
}
return
}
if !ctx.Repo.CanUseTimetracker(issue, ctx.User) {
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.JSON(http.StatusBadRequest, struct{ Message string }{Message: "time tracking disabled"})
return
}
ctx.Status(http.StatusForbidden)
return
}
err = models.DeleteIssueUserTimes(issue, ctx.User)
if err != nil {
if models.IsErrNotExist(err) {
ctx.Error(http.StatusNotFound, "DeleteIssueUserTimes", err)
} else {
ctx.Error(http.StatusInternalServerError, "DeleteIssueUserTimes", err)
}
return
}
ctx.Status(204)
}
// DeleteTime delete a specific time by id
func DeleteTime(ctx *context.APIContext) {
// swagger:operation Delete /repos/{owner}/{repo}/issues/{index}/times/{id} issue issueDeleteTime
// ---
// summary: Delete specific tracked time
// consumes:
// - application/json
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: index
// in: path
// description: index of the issue
// type: integer
// format: int64
// required: true
// - name: id
// in: path
// description: id of time to delete
// type: integer
// format: int64
// required: true
// responses:
// "204":
// "$ref": "#/responses/empty"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
issue, err := models.GetIssueByIndex(ctx.Repo.Repository.ID, ctx.ParamsInt64(":index"))
if err != nil {
if models.IsErrIssueNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetIssueByIndex", err)
}
return
}
if !ctx.Repo.CanUseTimetracker(issue, ctx.User) {
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.JSON(http.StatusBadRequest, struct{ Message string }{Message: "time tracking disabled"})
return
}
ctx.Status(http.StatusForbidden)
return
}
time, err := models.GetTrackedTimeByID(ctx.ParamsInt64(":id"))
if err != nil {
if models.IsErrNotExist(err) {
ctx.NotFound(err)
return
}
ctx.Error(http.StatusInternalServerError, "GetTrackedTimeByID", err)
return
}
if time.Deleted {
ctx.NotFound(fmt.Errorf("tracked time [%d] already deleted", time.ID))
return
}
if !ctx.User.IsAdmin && time.UserID != ctx.User.ID {
//Only Admin and User itself can delete their time
ctx.Status(http.StatusForbidden)
return
}
err = models.DeleteTime(time)
if err != nil {
ctx.Error(http.StatusInternalServerError, "DeleteTime", err)
return
}
ctx.Status(http.StatusNoContent)
}
// ListTrackedTimesByUser lists all tracked times of the user
func ListTrackedTimesByUser(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/times/{user} repository userTrackedTimes
// ---
// summary: List a user's tracked times in a repo
// deprecated: true
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: user
// in: path
// description: username of user
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/TrackedTimeList"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
return
}
user, err := models.GetUserByName(ctx.Params(":timetrackingusername"))
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.NotFound(err)
} else {
ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
}
return
}
if user == nil {
ctx.NotFound()
return
}
if !ctx.IsUserRepoAdmin() && !ctx.User.IsAdmin && ctx.User.ID != user.ID {
ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
return
}
opts := &models.FindTrackedTimesOptions{
UserID: user.ID,
RepositoryID: ctx.Repo.Repository.ID,
}
trackedTimes, err := models.GetTrackedTimes(opts)
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
return
}
if err = trackedTimes.LoadAttributes(); err != nil {
ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
return
}
ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(trackedTimes))
}
// ListTrackedTimesByRepository lists all tracked times of the repository
func | (ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/times repository repoTrackedTimes
// ---
// summary: List a repo's tracked times
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: user
// in: query
// description: optional filter by user (available for issue managers)
// type: string
// - name: since
// in: query
// description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// - name: before
// in: query
// description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// - name: page
// in: query
// description: page number of results to return (1-based)
// type: integer
// - name: limit
// in: query
// description: page size of results
// type: integer
// responses:
// "200":
// "$ref": "#/responses/TrackedTimeList"
// "400":
// "$ref": "#/responses/error"
// "403":
// "$ref": "#/responses/forbidden"
if !ctx.Repo.Repository.IsTimetrackerEnabled() {
ctx.Error(http.StatusBadRequest, "", "time tracking disabled")
return
}
opts := &models.FindTrackedTimesOptions{
ListOptions: utils.GetListOptions(ctx),
RepositoryID: ctx.Repo.Repository.ID,
}
// Filters
qUser := ctx.FormTrim("user")
if qUser != "" {
user, err := models.GetUserByName(qUser)
if models.IsErrUserNotExist(err) {
ctx.Error(http.StatusNotFound, "User does not exist", err)
} else if err != nil {
ctx.Error(http.StatusInternalServerError, "GetUserByName", err)
return
}
opts.UserID = user.ID
}
var err error
if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = utils.GetQueryBeforeSince(ctx); err != nil {
ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
return
}
cantSetUser := !ctx.User.IsAdmin &&
opts.UserID != ctx.User.ID &&
!ctx.IsUserRepoWriter([]unit.Type{unit.TypeIssues})
if cantSetUser {
if opts.UserID == 0 {
opts.UserID = ctx.User.ID
} else {
ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
return
}
}
count, err := models.CountTrackedTimes(opts)
if err != nil {
ctx.InternalServerError(err)
return
}
trackedTimes, err := models.GetTrackedTimes(opts)
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetTrackedTimes", err)
return
}
if err = trackedTimes.LoadAttributes(); err != nil {
ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
return
}
ctx.SetTotalCountHeader(count)
ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(trackedTimes))
}
// ListMyTrackedTimes lists all tracked times of the current user
func ListMyTrackedTimes(ctx *context.APIContext) {
// swagger:operation GET /user/times user userCurrentTrackedTimes
// ---
// summary: List the current user's tracked times
// parameters:
// - name: page
// in: query
// description: page number of results to return (1-based)
// type: integer
// - name: limit
// in: query
// description: page size of results
// type: integer
// produces:
// - application/json
// parameters:
// - name: since
// in: query
// description: Only show times updated after the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// - name: before
// in: query
// description: Only show times updated before the given time. This is a timestamp in RFC 3339 format
// type: string
// format: date-time
// responses:
// "200":
// "$ref": "#/responses/TrackedTimeList"
opts := &models.FindTrackedTimesOptions{
ListOptions: utils.GetListOptions(ctx),
UserID: ctx.User.ID,
}
var err error
if opts.CreatedBeforeUnix, opts.CreatedAfterUnix, err = utils.GetQueryBeforeSince(ctx); err != nil {
ctx.Error(http.StatusUnprocessableEntity, "GetQueryBeforeSince", err)
return
}
count, err := models.CountTrackedTimes(opts)
if err != nil {
ctx.InternalServerError(err)
return
}
trackedTimes, err := models.GetTrackedTimes(opts)
if err != nil {
ctx.Error(http.StatusInternalServerError, "GetTrackedTimesByUser", err)
return
}
if err = trackedTimes.LoadAttributes(); err != nil {
ctx.Error(http.StatusInternalServerError, "LoadAttributes", err)
return
}
ctx.SetTotalCountHeader(count)
ctx.JSON(http.StatusOK, convert.ToTrackedTimeList(trackedTimes))
}
| ListTrackedTimesByRepository |
container.go | package core
import (
"database/sql"
"io/ioutil"
"os"
"gopkg.in/src-d/core-retrieval.v0/model"
"gopkg.in/src-d/core-retrieval.v0/repository"
"gopkg.in/src-d/framework.v0/configurable"
"gopkg.in/src-d/framework.v0/database"
"gopkg.in/src-d/framework.v0/lock"
"gopkg.in/src-d/framework.v0/queue"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/osfs"
)
const transactionerLocalDir = "transactioner"
type containerConfig struct {
configurable.BasicConfiguration
TempDir string `default:"/tmp/sourced" split_words:"true"`
CleanTempDir bool `default:"false" split_words:"true"`
Broker string `default:"amqp://localhost:5672"`
RootRepositoriesDir string `default:"/tmp/root-repositories" split_words:"true"`
RootRepositoriesTempDir string `default:"/tmp/root-repositories-dot-copy" split_words:"true"`
Locking string `default:"local:"`
HDFS string `default:""`
BucketSize int `default:0`
}
var config = &containerConfig{}
func init() {
configurable.InitConfig(config)
}
var container struct {
Broker queue.Broker
Database *sql.DB
ModelRepositoryStore *model.RepositoryStore
ModelMentionStore *model.MentionStore
RootedTransactioner repository.RootedTransactioner
TempDirFilesystem billy.Filesystem
Locking lock.Service
}
// Broker returns a queue.Broker for the default queue system.
func Broker() queue.Broker {
if container.Broker == nil {
b, err := queue.NewBroker(config.Broker)
if err != nil {
panic(err)
}
container.Broker = b
}
return container.Broker
}
// Database returns a sql.DB for the default database. If it is not possible to
// connect to the database, this function will panic. Multiple calls will always
// return the same instance.
func Database() *sql.DB {
if container.Database == nil {
container.Database = database.Must(database.Default())
}
return container.Database
}
// ModelMentionStore returns the default *model.ModelMentionStore, using the
// default database. If it is not possible to connect to the database, this
// function will panic. Multiple calls will always return the same instance.
func ModelMentionStore() *model.MentionStore {
if container.ModelMentionStore == nil {
container.ModelMentionStore = model.NewMentionStore(Database())
}
return container.ModelMentionStore
}
// ModelRepositoryStore returns the default *model.RepositoryStore, using the
// default database. If it is not possible to connect to the database, this
// function will panic. Multiple calls will always return the same instance.
func ModelRepositoryStore() *model.RepositoryStore |
// TemporaryFilesystem returns a billy.Filesystem that can be used to store
// temporary files. This directory is dedicated to the running application.
func TemporaryFilesystem() billy.Filesystem {
if container.TempDirFilesystem == nil {
if config.CleanTempDir {
os.RemoveAll(config.TempDir)
}
if err := os.MkdirAll(config.TempDir, os.FileMode(0755)); err != nil {
panic(err)
}
dir, err := ioutil.TempDir(config.TempDir, "")
if err != nil {
panic(err)
}
container.TempDirFilesystem = osfs.New(dir)
}
return container.TempDirFilesystem
}
// Locking returns a lock.Service that can be used to acquire namespaced locks.
func Locking() lock.Service {
if container.Locking == nil {
service, err := lock.New(config.Locking)
if err != nil {
panic(err)
}
container.Locking = service
}
return container.Locking
}
// RootedTransactioner returns the default RootedTransactioner instance,
// using the default RootRepositories directory. The local filesystem used to
// create the transactioner is the default TemporaryFilesystem from core container.
func RootedTransactioner() repository.RootedTransactioner {
if container.RootedTransactioner == nil {
tmpFs, err := TemporaryFilesystem().Chroot(transactionerLocalDir)
if err != nil {
panic(err)
}
var remote repository.Fs
if config.HDFS == "" {
remote = repository.NewLocalFs(osfs.New(config.RootRepositoriesDir))
} else {
remote = repository.NewHDFSFs(
config.HDFS,
config.RootRepositoriesDir,
config.RootRepositoriesTempDir,
)
}
container.RootedTransactioner =
repository.NewSivaRootedTransactioner(
repository.NewCopier(
tmpFs,
remote,
config.BucketSize,
),
)
}
return container.RootedTransactioner
}
| {
if container.ModelRepositoryStore == nil {
container.ModelRepositoryStore = model.NewRepositoryStore(Database())
}
return container.ModelRepositoryStore
} |
privateendpointconnections.go | package synapse
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// PrivateEndpointConnectionsClient is the azure Synapse Analytics Management Client
type PrivateEndpointConnectionsClient struct {
BaseClient
}
// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client.
func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient {
return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client
// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
// clouds, Azure stack).
func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient {
return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Create approve or reject a private endpoint connection.
// Parameters:
// request - request body of private endpoint connection to create.
// resourceGroupName - the name of the resource group. The name is case insensitive.
// workspaceName - the name of the workspace.
// privateEndpointConnectionName - the name of the private endpoint connection.
func (client PrivateEndpointConnectionsClient) Create(ctx context.Context, request PrivateEndpointConnection, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (result PrivateEndpointConnectionsCreateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Create")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("synapse.PrivateEndpointConnectionsClient", "Create", err.Error())
}
req, err := client.CreatePreparer(ctx, request, resourceGroupName, workspaceName, privateEndpointConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Create", nil, "Failure preparing request")
return
}
result, err = client.CreateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Create", nil, "Failure sending request")
return
}
return
}
// CreatePreparer prepares the Create request.
func (client PrivateEndpointConnectionsClient) CreatePreparer(ctx context.Context, request PrivateEndpointConnection, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"workspaceName": autorest.Encode("path", workspaceName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
autorest.WithJSON(request),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client PrivateEndpointConnectionsClient) CreateSender(req *http.Request) (future PrivateEndpointConnectionsCreateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client PrivateEndpointConnectionsClient) (pec PrivateEndpointConnection, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsCreateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("synapse.PrivateEndpointConnectionsCreateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
pec.Response.Response, err = future.GetResult(sender)
if pec.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsCreateFuture", "Result", nil, "received nil response and error")
}
if err == nil && pec.Response.Response.StatusCode != http.StatusNoContent {
pec, err = client.CreateResponder(pec.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsCreateFuture", "Result", pec.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// CreateResponder handles the response to the Create request. The method always
// closes the http.Response Body.
func (client PrivateEndpointConnectionsClient) CreateResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete delete a private endpoint connection.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
// workspaceName - the name of the workspace.
// privateEndpointConnectionName - the name of the private endpoint connection.
func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (result PrivateEndpointConnectionsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("synapse.PrivateEndpointConnectionsClient", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, workspaceName, privateEndpointConnectionName)
if err != nil |
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"workspaceName": autorest.Encode("path", workspaceName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (future PrivateEndpointConnectionsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = func(client PrivateEndpointConnectionsClient) (or OperationResource, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("synapse.PrivateEndpointConnectionsDeleteFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
or.Response.Response, err = future.GetResult(sender)
if or.Response.Response == nil && err == nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsDeleteFuture", "Result", nil, "received nil response and error")
}
if err == nil && or.Response.Response.StatusCode != http.StatusNoContent {
or, err = client.DeleteResponder(or.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsDeleteFuture", "Result", or.Response.Response, "Failure responding to request")
}
}
return
}
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result OperationResource, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets a private endpoint connection.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
// workspaceName - the name of the workspace.
// privateEndpointConnectionName - the name of the private endpoint connection.
func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("synapse.PrivateEndpointConnectionsClient", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, workspaceName, privateEndpointConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, workspaceName string, privateEndpointConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"workspaceName": autorest.Encode("path", workspaceName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists private endpoint connection in workspace.
// Parameters:
// resourceGroupName - the name of the resource group. The name is case insensitive.
// workspaceName - the name of the workspace.
func (client PrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, workspaceName string) (result PrivateEndpointConnectionListPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List")
defer func() {
sc := -1
if result.pecl.Response.Response != nil {
sc = result.pecl.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: client.SubscriptionID,
Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("synapse.PrivateEndpointConnectionsClient", "List", err.Error())
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, workspaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.pecl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "List", resp, "Failure sending request")
return
}
result.pecl, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "List", resp, "Failure responding to request")
return
}
if result.pecl.hasNextLink() && result.pecl.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client PrivateEndpointConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string, workspaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"workspaceName": autorest.Encode("path", workspaceName),
}
const APIVersion = "2019-06-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client PrivateEndpointConnectionsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client PrivateEndpointConnectionsClient) ListResponder(resp *http.Response) (result PrivateEndpointConnectionList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client PrivateEndpointConnectionsClient) listNextResults(ctx context.Context, lastResults PrivateEndpointConnectionList) (result PrivateEndpointConnectionList, err error) {
req, err := lastResults.privateEndpointConnectionListPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client PrivateEndpointConnectionsClient) ListComplete(ctx context.Context, resourceGroupName string, workspaceName string) (result PrivateEndpointConnectionListIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName, workspaceName)
return
}
| {
err = autorest.NewErrorWithError(err, "synapse.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request")
return
} |
tagger.py |
"""
# BEGIN TAG_DEMO
>>> tag('br') # <1>
'<br />'
>>> tag('p', 'hello') # <2>
'<p>hello</p>'
>>> print(tag('p', 'hello', 'world'))
<p>hello</p>
<p>world</p>
>>> tag('p', 'hello', id=33) # <3>
'<p id="33">hello</p>'
>>> print(tag('p', 'hello', 'world', cls='sidebar')) # <4>
<p class="sidebar">hello</p>
<p class="sidebar">world</p>
>>> tag(content='testing', name="img") # <5>
'<img content="testing" />'
>>> my_tag = {'name': 'img', 'title': 'Sunset Boulevard',
... 'src': 'sunset.jpg', 'cls': 'framed'}
>>> tag(**my_tag) # <6>
'<img class="framed" src="sunset.jpg" title="Sunset Boulevard" />'
# END TAG_DEMO
"""
# BEGIN TAG_FUNC
def tag(name, *content, cls=None, **attrs):
|
# END TAG_FUNC
| """Generate one or more HTML tags"""
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr, value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str) |
RealName_Auth_from_aidaiwangApp.py | # -*- coding: utf-8 -*-
__author__ = 'aidai_TEC_QA'
# -*- date:'2017/8/1 0001' -*-
def start_to_realnameauth(): | print(u'realname auth') |
|
sprk-footer.component.ts | import { Component, Input } from '@angular/core';
import {
ISprkFooterGlobalLink,
ISprkFooterLocalLinkColumn,
ISprkFooterSocialLink,
ISprkFooterAward,
ISprkFooterBadgeLink,
ISprkDisclaimerText,
ISprkDisclaimerToggle,
} from './sprk-footer.interfaces';
@Component({
selector: 'sprk-footer',
template: `
<div class="sprk-o-Box sprk-o-Box--large sprk-c-Footer">
<footer
[ngClass]="getClasses()"
role="contentinfo"
[attr.data-id]="idString"
>
<div
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--medium sprk-o-Stack--split@m"
>
<div
*ngIf="globalLinks"
class="sprk-o-Stack__item sprk-o-Stack__item--three-tenths@m sprk-o-Stack sprk-o-Stack--misc-b sprk-o-Box sprk-u-prh"
>
<h3
class="sprk-o-Stack__item sprk-b-TypeBodyOne sprk-c-Footer__text"
>
{{ globalHeading }}
</h3>
<div
*ngFor="let item of globalLinks"
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--medium"
>
<div class="sprk-o-Stack__item">
<a
*ngIf="!item.routerLink"
sprkLink
variant="plain"
href="{{ item.href }}"
analyticsString="{{ item.analytics || item.analyticsString }}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{ item.iconCSS }} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
<img
*ngIf="item.imgSrc"
src="{{ item.imgSrc }}"
alt="{{ item.imgAlt }}"
class="{{ item.imgCSS }}"
/>
</a>
<a
*ngIf="item.routerLink"
sprkLink
variant="plain"
[routerLink]="item.routerLink"
analyticsString="{{ item.analytics || item.analyticsString }}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{ item.iconCSS }} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
<img
*ngIf="item.imgSrc"
src="{{ item.imgSrc }}"
alt="{{ item.imgAlt }}"
class="{{ item.imgCSS }}"
/>
</a>
</div>
<p
class="sprk-o-Stack__item sprk-b-TypeBodyFour sprk-c-Footer__text"
>
{{ item.text }}
</p>
</div>
</div>
<div
class="sprk-o-Stack__item sprk-o-Stack__item--seven-tenths@m sprk-o-Stack sprk-o-Stack--medium"
>
<div
*ngIf="localLinks"
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--medium sprk-o-Stack--split@m"
>
<div
*ngFor="let item of localLinks"
class="sprk-o-Stack__item sprk-o-Stack__item--third@m sprk-o-Box sprk-u-PaddingRight--a sprk-o-Stack sprk-o-Stack--large"
>
<h3
class="sprk-o-Stack__item sprk-b-TypeBodyOne sprk-c-Footer__text"
>
{{ item.heading }}
</h3>
<ul
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--misc-a sprk-b-List sprk-b-List--bare"
>
<li
class="sprk-o-Stack__item"
*ngFor="let link of item.links"
>
<a
*ngIf="!link.routerLink"
sprkLink
variant="light"
class="sprk-c-Footer__link"
href="{{ link.href }}"
analyticsString="{{
link.analyticsString || link.analytics
}}"
>
{{ link.text }}
</a>
<a
*ngIf="link.routerLink"
sprkLink
variant="light"
class="sprk-c-Footer__link"
[routerLink]="link.routerLink"
analyticsString="{{
link.analyticsString || link.analytics
}}"
>
{{ link.text }}
</a>
</li>
</ul>
</div>
</div>
<div
*ngIf="socialLinks"
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--large sprk-o-Box"
>
<h3
class="sprk-o-Stack__item sprk-b-TypeBodyOne sprk-c-Footer__text"
>
{{ connectHeading }}
</h3>
<div
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--large sprk-o-Stack--split@m"
>
<ul
*ngIf="socialLinks"
class="
sprk-o-Stack__item
sprk-o-Stack__item--flex@m
sprk-o-Stack
sprk-o-Stack--split@xxs
sprk-o-Stack--medium
sprk-b-List
sprk-b-List--bare"
>
<li
*ngFor="let item of socialLinks"
class="sprk-o-Stack__item"
>
<a
*ngIf="!item.routerLink"
sprkLink
variant="plain"
href="{{ item.href }}"
analyticsString="{{
item.analytics || item.analyticsString
}}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{
item.iconCSS
}} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
</a>
<a
*ngIf="item.routerLink"
sprkLink
variant="plain"
[routerLink]="item.routerLink"
analyticsString="{{
item.analytics || item.analyticsString
}}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{
item.iconCSS
}} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
</a>
</li>
</ul>
</div>
</div>
</div>
</div>
<span
class="sprk-c-Divider sprk-u-mvn sprk-u-mhm"
data-id="divider-1"
></span>
<div
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--misc-b sprk-o-Box sprk-u-PaddingTop--b"
>
<div
*ngIf="awards"
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--large"
>
<h3
class="sprk-o-Stack__item sprk-b-TypeBodyOne sprk-c-Footer__text"
>
{{ awardsHeading }}
</h3>
<div
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--medium sprk-o-Stack--split@s sprk-u-mbm"
>
<div class="sprk-o-Stack__item" *ngFor="let award of awards">
<a
*ngIf="!award.routerLink"
sprkLink
variant="plain"
class="sprk-o-Stack__item"
href="{{ award.href }}"
analyticsString="{{
award.analytics || award.analyticsString
}}"
>
<img
*ngIf="award.imgSrc"
src="{{ award.imgSrc }}"
alt="{{ award.imgAlt }}"
class="{{ award.imgCSS }}"
/>
</a>
<a
*ngIf="award.routerLink"
sprkLink
variant="plain"
class="sprk-o-Stack__item"
[routerLink]="award.routerLink"
analyticsString="{{
award.analytics || award.analyticsString
}}"
>
<img
*ngIf="award.imgSrc"
src="{{ award.imgSrc }}"
alt="{{ award.imgAlt }}"
class="{{ award.imgCSS }}"
/>
</a>
</div>
</div>
<div
*ngFor="let toggle of disclaimerToggle"
class="sprk-o-Stack__item"
>
<sprk-toggle
title="{{ toggle.title }}"
analyticsString="{{
toggle.analytics || toggle.analyticsString
}}"
iconClass="sprk-c-Footer__icon"
titleFontClass="sprk-b-TypeBodyFour sprk-c-Footer__trigger"
>
<p class="sprk-b-TypeBodyFour sprk-c-Footer__text">
{{ toggle.body }}
</p>
</sprk-toggle>
</div>
</div>
<div
*ngIf="disclaimerText"
class="sprk-o-Stack__item sprk-o-Stack sprk-o-Stack--large"
>
<p
*ngFor="let disclaimer of disclaimerText"
class="sprk-o-Stack__item sprk-b-TypeBodyFour sprk-c-Footer__text"
>
{{ disclaimer.text }}
</p>
<ng-content select="[additional-disclaimer-slot]"></ng-content>
</div>
<ul
*ngIf="badgeLinks"
class="
sprk-o-Stack__item
sprk-o-Stack__item--flex@m
sprk-o-Stack
sprk-o-Stack--split@xxs
sprk-o-Stack--medium
sprk-b-List
sprk-b-List--bare"
>
<li *ngFor="let item of badgeLinks" class="sprk-o-Stack__item">
<a
*ngIf="!item.routerLink"
sprkLink
variant="plain"
href="{{ item.href }}"
analyticsString="{{ item.analytics || item.analyticsString }}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{ item.iconCSS }} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
</a>
<a
*ngIf="item.routerLink"
sprkLink
variant="plain"
[routerLink]="item.routerLink"
analyticsString="{{ item.analytics || item.analyticsString }}"
>
<sprk-icon
*ngIf="item.icon"
iconType="{{ item.icon }}"
additionalClasses="{{ item.iconCSS }} sprk-c-Footer__icon"
></sprk-icon>
<span *ngIf="item.icon" class="sprk-u-ScreenReaderText">{{
item.iconScreenReaderText
}}</span>
</a>
</li>
</ul>
</div>
</footer>
</div>
`,
})
export class SprkFooterComponent {
/**
* Expects a space separated string
* of classes to be added to the
* component.
*/
@Input()
additionalClasses: string;
/**
* The value supplied will be assigned
* to the `data-id` attribute on the | * per page.
*/
@Input()
idString: string;
/**
* The heading for the "Global" section.
*/
@Input()
globalHeading: string;
/**
* The heading for the "Awards" section.
*/
@Input()
awardsHeading: string;
/**
* The heading for the "Connect With Us" section.
*/
@Input()
connectHeading: string;
/**
* Array of
* [ISprkFooterGlobalLink](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build the
* links in the "Global" section.
*/
@Input()
globalLinks: ISprkFooterGlobalLink[];
/**
* Array of
* [ISprkFooterLocalLinkColumn](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build
* the columns of links
* in the "Site Links" section.
*/
@Input()
localLinks: ISprkFooterLocalLinkColumn[];
/**
* Array of
* [ISprkFooterSocialLink](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build the
* icon links in the "Connect With Us section".
*/
@Input()
socialLinks: ISprkFooterSocialLink[];
/**
* Array of
* [ISprkFooterAward](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build the
* awards in the "Awards" section.
*/
@Input()
awards: ISprkFooterAward[];
/**
* Array of
* [ISprkFooterBadgeLink](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build the
* icon links in the icon
* section in the bottom
* of the Footer.
*/
@Input()
badgeLinks: ISprkFooterBadgeLink[];
/**
* Array of
* [ISprkDisclaimerText](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build disclaimer text in the
* of the Footer.
*/
@Input()
disclaimerText: ISprkDisclaimerText[];
/**
* Array of
* [ISprkDisclaimerToggle](https://github.com/sparkdesignsystem/spark-design-system/blob/main/angular/projects/spark-angular/src/lib/components/sprk-footer/sprk-footer.interfaces.ts)
* used to build
* the disclamer toggle(s) in
* the the Footer.
*/
@Input()
disclaimerToggle: ISprkDisclaimerToggle[];
/**
* @ignore
*/
getClasses(): string {
const classArray: string[] = [
'sprk-o-CenteredColumn sprk-o-Stack sprk-o-Stack--misc-b sprk-c-Footer',
];
if (this.additionalClasses) {
this.additionalClasses.split(' ').forEach((className) => {
classArray.push(className);
});
}
return classArray.join(' ');
}
} | * component. This is intended to be
* used as a selector for automated
* tools. This value should be unique |
interfaces.ts | import {Decimal} from 'decimal.js';
export enum ResultState {
NONE = 'NONE',
REGULAR = 'REGULAR',
NOLIFT = 'NOLIFT',
}
export interface CashFlowEntry {
amount: Decimal;
count: Decimal;
flowNumber: number;
}
export interface ProgramWord {
arg1: number;
arg2?: number;
arg3?: number;
}
export interface State {
mDotDY: boolean;
wasG: boolean;
wasF: boolean;
hasInput: boolean;
wasResult: ResultState;
wasSto: boolean;
stoOp: ActionType;
backspace: boolean;
backspaceStates: State[];
wasRcl: boolean;
wasGto: boolean;
inputChars: string;
fPrecision: number;
error: digit | null;
programMode: boolean;
programMemory: ProgramWord[];
programEditCounter: number;
programCounter: number;
programRunning: boolean;
gtoScratch: number[];
eexValue: EEXData;
displaySpecial: string;
dec: Decimal;
N: Decimal;
PV: Decimal;
I: Decimal;
FV: Decimal;
PMT: Decimal;
x: Decimal;
xInpPrec: number;
lastX: Decimal;
y: Decimal;
z: Decimal;
t: Decimal;
begEnd: Decimal;
registers: Decimal[];
cashFlowCounts: Decimal[];
compoundInterest: boolean;
}
export type StateKey =
| 'mDotDY'
| 'wasG'
| 'wasF'
| 'hasInput'
| 'wasResult'
| 'wasSto'
| 'stoOp'
| 'backspace'
| 'backspaceStates'
| 'wasRcl'
| 'dec'
| 'N'
| 'PV'
| 'I'
| 'FV'
| 'PMT'
| 'x'
| 'y'
| 'z'
| 't'
| 'begEnd'
| 'registers'
| 'cashFlowCounts';
export type digit = 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9;
export interface EEXData {
origX: Decimal;
exponent: number;
positive: boolean;
}
export interface StateUpdate {
mDotDY?: boolean;
wasG?: boolean;
wasF?: boolean;
hasInput?: boolean;
wasResult?: ResultState;
wasSto?: boolean;
stoOp?: ActionType;
backspace?: boolean;
backspaceStates?: State[];
wasRcl?: boolean;
inputChars?: string;
fPrecision?: number;
error?: digit | null;
eexValue?: EEXData;
cashFlowCounts?: Decimal[];
simpleInterest?: boolean;
programRunning?: boolean;
programMode?: boolean;
programMemory?: ProgramWord[];
programCounter?: number;
programEditCounter?: number;
gtoScratch?: number[];
dec?: Decimal;
N?: Decimal;
PV?: Decimal;
I?: Decimal;
FV?: Decimal;
PMT?: Decimal;
x?: Decimal;
xInpPrec?: number;
lastX?: Decimal;
y?: Decimal;
z?: Decimal;
t?: Decimal;
begEnd?: Decimal;
registers?: Decimal[];
displaySpecial?: string;
cashFlows?: CashFlowEntry[];
}
export type ActionType =
// internal actions
| 'setState'
| 'noop'
| 'gto'
// button actions
| '.'
| '+'
| 'Enter'
| '-'
| 'clx'
| 'EEX'
| 'singleStep'
| 'runStop'
| 'FV'
| 'PV'
| 'PMT'
| 'I'
| 'N'
| 'rcl'
| 'sto'
| 'swapxy'
| 'f'
| 'g'
| 'rotateStack'
| 'recipX'
| 'chs'
| 'sigmaPlus'
| 'percent'
| 'percentTotal'
| 'percentChange'
| 'ytox'
| 'div'
| 'times'
| 0
| 1
| 2
| 3
| 4
| 5
| 6
| 7
| 8
| 9;
export interface Action {
type: ActionType;
value?: State; // for setState
fromRunner?: boolean; // to denote action is being delivered by the program runner, rather than the keyboard
gtoTarget?: number;
}
export interface StatsRegisterBundle {
n: Decimal;
sumX: Decimal;
sumX2: Decimal;
sumY: Decimal;
sumY2: Decimal;
sumXY: Decimal;
}
export function | (state: State): StatsRegisterBundle {
return {
n: state.registers[1],
sumX: state.registers[2],
sumX2: state.registers[3],
sumY: state.registers[4],
sumY2: state.registers[5],
sumXY: state.registers[6],
};
}
| makeRegisterBundle |
lib.rs | #![deny(
unstable_features, | unused_imports,
unused_import_braces
)]
#[macro_use]
extern crate error_chain;
mod client;
pub mod errors;
mod util;
pub mod model;
pub mod account;
pub mod api;
pub mod general;
pub mod market;
pub mod userstream;
pub mod websockets;
pub mod futures; | unused_must_use,
unused_mut, |
test_reactive.py | import unittest.mock
from functools import partial
import bokeh.core.properties as bp
import param
import pytest
from bokeh.document import Document
from bokeh.io.doc import patch_curdoc
from bokeh.models import Div
from panel.layout import Tabs, WidgetBox
from panel.reactive import Reactive, ReactiveHTML
from panel.viewable import Viewable
from panel.widgets import (
Checkbox, IntInput, StaticText, TextInput,
)
def test_reactive_default_title():
doc = ReactiveHTML().server_doc()
assert doc.title == 'Panel Application'
def test_reactive_servable_title():
doc = Document()
session_context = unittest.mock.Mock()
with patch_curdoc(doc):
doc._session_context = lambda: session_context
ReactiveHTML().servable(title='A')
ReactiveHTML().servable(title='B')
assert doc.title == 'B'
def test_link():
"Link two Reactive objects"
class | (Reactive):
a = param.Parameter()
obj = ReactiveLink()
obj2 = ReactiveLink()
obj.link(obj2, a='a')
obj.a = 1
assert obj.a == 1
assert obj2.a == 1
def test_param_rename():
"Test that Reactive renames params and properties"
class ReactiveRename(Reactive):
a = param.Parameter()
_rename = {'a': 'b'}
obj = ReactiveRename()
params = obj._process_property_change({'b': 1})
assert params == {'a': 1}
properties = obj._process_param_change({'a': 1})
assert properties == {'b': 1}
def test_link_properties_nb(document, comm):
class ReactiveLink(Reactive):
text = param.String(default='A')
obj = ReactiveLink()
div = Div()
# Link property and check bokeh js property callback is defined
obj._link_props(div, ['text'], document, div, comm)
assert 'text' in div._callbacks
# Assert callback is set up correctly
cb = div._callbacks['text'][0]
assert isinstance(cb, partial)
assert cb.args == (document, div.ref['id'], comm, None)
assert cb.func == obj._comm_change
def test_link_properties_server(document):
class ReactiveLink(Reactive):
text = param.String(default='A')
obj = ReactiveLink()
div = Div()
# Link property and check bokeh callback is defined
obj._link_props(div, ['text'], document, div)
assert 'text' in div._callbacks
# Assert callback is set up correctly
cb = div._callbacks['text'][0]
assert isinstance(cb, partial)
assert cb.args == (document, div.ref['id'], None)
assert cb.func == obj._server_change
def test_text_input_controls():
text_input = TextInput()
controls = text_input.controls()
assert isinstance(controls, Tabs)
assert len(controls) == 2
wb1, wb2 = controls
assert isinstance(wb1, WidgetBox)
assert len(wb1) == 6
name, disabled, *(ws) = wb1
assert isinstance(name, StaticText)
assert isinstance(disabled, Checkbox)
not_checked = []
for w in ws:
if w.name == 'Value':
assert isinstance(w, TextInput)
text_input.value = "New value"
assert w.value == "New value"
elif w.name == 'Value input':
assert isinstance(w, TextInput)
elif w.name == 'Placeholder':
assert isinstance(w, TextInput)
text_input.placeholder = "Test placeholder..."
assert w.value == "Test placeholder..."
elif w.name == 'Max length':
assert isinstance(w, IntInput)
else:
not_checked.append(w)
assert not not_checked
assert isinstance(wb2, WidgetBox)
assert len(wb2) == len(list(Viewable.param)) + 1
def test_text_input_controls_explicit():
text_input = TextInput()
controls = text_input.controls(['placeholder', 'disabled'])
assert isinstance(controls, WidgetBox)
assert len(controls) == 3
name, disabled, placeholder = controls
assert isinstance(name, StaticText)
assert isinstance(disabled, Checkbox)
assert isinstance(placeholder, TextInput)
text_input.disabled = True
assert disabled.value
text_input.placeholder = "Test placeholder..."
assert placeholder.value == "Test placeholder..."
def test_reactive_html_basic():
class Test(ReactiveHTML):
int = param.Integer(default=3, doc='An integer')
float = param.Number(default=3.14, doc='A float')
_template = '<div id="div" width=${int}></div>'
data_model = Test._data_model
assert data_model.__name__ == 'Test1'
properties = data_model.properties()
assert 'int' in properties
assert 'float' in properties
int_prop = data_model.lookup('int')
assert isinstance(int_prop.property, bp.Int)
assert int_prop.class_default(data_model) == 3
float_prop = data_model.lookup('float')
assert isinstance(float_prop.property, bp.Float)
assert float_prop.class_default(data_model) == 3.14
assert Test._node_callbacks == {}
test = Test()
root = test.get_root()
assert test._attrs == {'div': [('width', ['int'], '{int}')]}
assert root.callbacks == {}
assert root.events == {}
def test_reactive_html_no_id_param_error():
with pytest.raises(ValueError) as excinfo:
class Test(ReactiveHTML):
width = param.Number(default=200)
_template = '<div width=${width}></div>'
assert "Found <div> node with the `width` attribute referencing the `width` parameter." in str(excinfo.value)
def test_reactive_html_no_id_method_error():
with pytest.raises(ValueError) as excinfo:
class Test(ReactiveHTML):
_template = '<div onclick=${_onclick}></div>'
def _onclick(self):
pass
assert "Found <div> node with the `onclick` callback referencing the `_onclick` method." in str(excinfo.value)
def test_reactive_html_dom_events():
class TestDOMEvents(ReactiveHTML):
int = param.Integer(default=3, doc='An integer')
float = param.Number(default=3.14, doc='A float')
_template = '<div id="div" width=${int}></div>'
_dom_events = {'div': ['change']}
data_model = TestDOMEvents._data_model
assert data_model.__name__ == 'TestDOMEvents1'
properties = data_model.properties()
assert 'int' in properties
assert 'float' in properties
int_prop = data_model.lookup('int')
assert isinstance(int_prop.property, bp.Int)
assert int_prop.class_default(data_model) == 3
float_prop = data_model.lookup('float')
assert isinstance(float_prop.property, bp.Float)
assert float_prop.class_default(data_model) == 3.14
assert TestDOMEvents._node_callbacks == {}
test = TestDOMEvents()
root = test.get_root()
assert test._attrs == {'div': [('width', ['int'], '{int}')]}
assert root.callbacks == {}
assert root.events == {'div': {'change': True}}
def test_reactive_html_inline():
class TestInline(ReactiveHTML):
int = param.Integer(default=3, doc='An integer')
_template = '<div id="div" onchange=${_div_change} width=${int}></div>'
def _div_change(self, event):
pass
data_model = TestInline._data_model
assert data_model.__name__ == 'TestInline1'
properties = data_model.properties()
assert 'int' in properties
int_prop = data_model.lookup('int')
assert isinstance(int_prop.property, bp.Int)
assert int_prop.class_default(data_model) == 3
assert TestInline._node_callbacks == {'div': [('onchange', '_div_change')]}
assert TestInline._inline_callbacks == [('div', 'onchange', '_div_change')]
test = TestInline()
root = test.get_root()
assert test._attrs == {
'div': [
('onchange', [], '{_div_change}'),
('width', ['int'], '{int}')
]
}
assert root.callbacks == {'div': [('onchange', '_div_change')]}
assert root.events == {}
test.on_event('div', 'click', print)
assert root.events == {'div': {'click': False}}
def test_reactive_html_children():
class TestChildren(ReactiveHTML):
children = param.List(default=[])
_template = '<div id="div">${children}</div>'
assert TestChildren._node_callbacks == {}
assert TestChildren._inline_callbacks == []
assert TestChildren._parser.children == {'div': 'children'}
widget = TextInput()
test = TestChildren(children=[widget])
root = test.get_root()
assert test._attrs == {}
assert root.children == {'div': [widget._models[root.ref['id']][0]]}
assert len(widget._models) == 1
assert test._panes == {'children': [widget]}
widget_new = TextInput()
test.children = [widget_new]
assert len(widget._models) == 0
assert root.children == {'div': [widget_new._models[root.ref['id']][0]]}
assert test._panes == {'children': [widget_new]}
test._cleanup(root)
assert len(test._models) == 0
assert len(widget_new._models) == 0
def test_reactive_html_templated_children():
class TestTemplatedChildren(ReactiveHTML):
children = param.List(default=[])
_template = """
<select id="select">
{% for option in children %}
<option id="option-{{ loop.index0 }}">${children[{{ loop.index0 }}]}</option>
{% endfor %}
</div>
"""
assert TestTemplatedChildren._node_callbacks == {}
assert TestTemplatedChildren._inline_callbacks == []
assert TestTemplatedChildren._parser.children == {'option': 'children'}
widget = TextInput()
test = TestTemplatedChildren(children=[widget])
root = test.get_root()
assert test._attrs == {}
assert root.looped == ['option']
assert root.children == {'option': [widget._models[root.ref['id']][0]]}
assert test._panes == {'children': [widget]}
widget_new = TextInput()
test.children = [widget_new]
assert len(widget._models) == 0
assert root.children == {'option': [widget_new._models[root.ref['id']][0]]}
assert test._panes == {'children': [widget_new]}
def test_reactive_html_templated_dict_children():
class TestTemplatedChildren(ReactiveHTML):
children = param.Dict(default={})
_template = """
<select id="select">
{% for key, option in children.items() %}
<option id="option-{{ loop.index0 }}">${children[{{ key }}]}</option>
{% endfor %}
</div>
"""
assert TestTemplatedChildren._node_callbacks == {}
assert TestTemplatedChildren._inline_callbacks == []
assert TestTemplatedChildren._parser.children == {'option': 'children'}
widget = TextInput()
test = TestTemplatedChildren(children={'test': widget})
root = test.get_root()
assert test._attrs == {}
assert root.looped == ['option']
assert root.children == {'option': [widget._models[root.ref['id']][0]]}
assert test._panes == {'children': [widget]}
widget_model = widget._models[root.ref['id']][0]
widget_new = TextInput()
test.children = {'test': widget_new, 'test2': widget}
assert len(widget._models) == 1
assert root.children == {
'option': [
widget_new._models[root.ref['id']][0],
widget_model
]
}
assert test._panes == {'children': [widget_new, widget]}
def test_reactive_html_templated_children_add_loop_id():
class TestTemplatedChildren(ReactiveHTML):
children = param.List(default=[])
_template = """
<select id="select">
{%- for option in children %}
<option id="option">${children[{{ loop.index0 }}]}</option>
{%- endfor %}
</select>
"""
assert TestTemplatedChildren._node_callbacks == {}
assert TestTemplatedChildren._inline_callbacks == []
assert TestTemplatedChildren._parser.children == {'option': 'children'}
test = TestTemplatedChildren(children=['A', 'B', 'C'])
assert test._get_template()[0] == """
<select id="select-${id}">
<option id="option-0-${id}"></option>
<option id="option-1-${id}"></option>
<option id="option-2-${id}"></option>
</select>
"""
model = test.get_root()
assert test._attrs == {}
assert model.looped == ['option']
def test_reactive_html_templated_children_add_loop_id_and_for_loop_var():
class TestTemplatedChildren(ReactiveHTML):
children = param.List(default=[])
_template = """
<select id="select">
{%- for option in children %}
<option id="option">${option}</option>
{%- endfor %}
</select>
"""
assert TestTemplatedChildren._node_callbacks == {}
assert TestTemplatedChildren._inline_callbacks == []
assert TestTemplatedChildren._parser.children == {'option': 'children'}
test = TestTemplatedChildren(children=['A', 'B', 'C'])
assert test._get_template()[0] == """
<select id="select-${id}">
<option id="option-0-${id}"></option>
<option id="option-1-${id}"></option>
<option id="option-2-${id}"></option>
</select>
"""
model = test.get_root()
assert test._attrs == {}
assert model.looped == ['option']
@pytest.mark.parametrize('operator', ['', '+', '-', '*', '\\', '%', '**', '>>', '<<', '>>>', '&', '^', '&&', '||', '??'])
@pytest.mark.parametrize('sep', [' ', ''])
def test_reactive_html_scripts_linked_properties_assignment_operator(operator, sep):
class TestScripts(ReactiveHTML):
clicks = param.Integer()
_template = "<div id='test'></div>"
_scripts = {'render': f'test.onclick = () => {{ data.clicks{sep}{operator}= 1 }}'}
assert TestScripts()._linked_properties() == ['clicks']
| ReactiveLink |
serializers.py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company | exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
# * User Info
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
# * AccountProfile Info
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance | |
7d56176e.b687cd59.js | "use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6042],{4137:function(e,t,n){n.d(t,{Zo:function(){return u},kt:function(){return d}});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t<arguments.length;t++){var n=null!=arguments[t]?arguments[t]:{};t%2?o(Object(n),!0).forEach((function(t){r(e,t,n[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(n)):o(Object(n)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(n,t))}))}return e}function l(e,t){if(null==e)return{};var n,a,r=function(e,t){if(null==e)return{};var n,a,r={},o=Object.keys(e);for(a=0;a<o.length;a++)n=o[a],t.indexOf(n)>=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a<o.length;a++)n=o[a],t.indexOf(n)>=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var c=a.createContext({}),s=function(e){var t=a.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=s(e.components);return a.createElement(c.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,o=e.originalType,c=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),m=s(n),d=r,h=m["".concat(c,".").concat(d)]||m[d]||p[d]||o;return n?a.createElement(h,i(i({ref:t},u),{},{components:n})):a.createElement(h,i({ref:t},u))}));function d(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=n.length,i=new Array(o);i[0]=m;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:r,i[1]=l;for(var s=2;s<o;s++)i[s]=n[s];return a.createElement.apply(null,i)}return a.createElement.apply(null,n)}m.displayName="MDXCreateElement"},8448:function(e,t,n){var a=n(7294);t.Z=function(e){var t=e.children,n=e.hidden,r=e.className;return a.createElement("div",{role:"tabpanel",hidden:n,className:r},t)}},7225:function(e,t,n){n.d(t,{Z:function(){return m}});var a=n(7462),r=n(7294),o=n(1048),i=n(2713);var l=function(){var e=(0,r.useContext)(i.Z);if(null==e)throw new Error('"useUserPreferencesContext" is used outside of "Layout" component.');return e},c=n(3309),s=n(6010),u="tabItem_vU9c";function p(e){var t,n,a,o=e.lazy,i=e.block,p=e.defaultValue,m=e.values,d=e.groupId,h=e.className,v=r.Children.map(e.children,(function(e){if((0,r.isValidElement)(e)&&void 0!==e.props.value)return e;throw new Error("Docusaurus error: Bad <Tabs> child <"+("string"==typeof e.type?e.type:e.type.name)+'>: all children of the <Tabs> component should be <TabItem>, and every <TabItem> should have a unique "value" prop.')})),b=null!=m?m:v.map((function(e){var t=e.props;return{value:t.value,label:t.label}})),f=(0,c.lx)(b,(function(e,t){return e.value===t.value}));if(f.length>0)throw new Error('Docusaurus error: Duplicate values "'+f.map((function(e){return e.value})).join(", ")+'" found in <Tabs>. Every value needs to be unique.');var y=null===p?p:null!=(t=null!=p?p:null==(n=v.find((function(e){return e.props.default})))?void 0:n.props.value)?t:null==(a=v[0])?void 0:a.props.value;if(null!==y&&!b.some((function(e){return e.value===y})))throw new Error('Docusaurus error: The <Tabs> has a defaultValue "'+y+'" but none of its children has the corresponding value. Available values are: '+b.map((function(e){return e.value})).join(", ")+". If you intend to show no default tab, use defaultValue={null} instead.");var w=l(),g=w.tabGroupChoices,k=w.setTabGroupChoices,x=(0,r.useState)(y),N=x[0],A=x[1],T=[],O=(0,c.o5)().blockElementScrollPositionUntilNextRender;if(null!=d){var E=g[d];null!=E&&E!==N&&b.some((function(e){return e.value===E}))&&A(E)}var j=function(e){var t=e.currentTarget,n=T.indexOf(t),a=b[n].value;a!==N&&(O(t),A(a),null!=d&&k(d,a))},C=function(e){var t,n=null;switch(e.key){case"ArrowRight":var a=T.indexOf(e.currentTarget)+1;n=T[a]||T[0];break;case"ArrowLeft":var r=T.indexOf(e.currentTarget)-1;n=T[r]||T[T.length-1]}null==(t=n)||t.focus()};return r.createElement("div",{className:"tabs-container"},r.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,s.Z)("tabs",{"tabs--block":i},h)},b.map((function(e){var t=e.value,n=e.label;return r.createElement("li",{role:"tab",tabIndex:N===t?0:-1,"aria-selected":N===t,className:(0,s.Z)("tabs__item",u,{"tabs__item--active":N===t}),key:t,ref:function(e){return T.push(e)},onKeyDown:C,onFocus:j,onClick:j},null!=n?n:t)}))),o?(0,r.cloneElement)(v.filter((function(e){return e.props.value===N}))[0],{className:"margin-vert--md"}):r.createElement("div",{className:"margin-vert--md"},v.map((function(e,t){return(0,r.cloneElement)(e,{key:t,hidden:e.props.value!==N})}))))}function m(e){var t=(0,o.Z)();return r.createElement(p,(0,a.Z)({key:String(t)},e))}},2713:function(e,t,n){var a=(0,n(7294).createContext)(void 0);t.Z=a},8172:function(e,t,n){n.r(t),n.d(t,{frontMatter:function(){return s},contentTitle:function(){return u},metadata:function(){return p},toc:function(){return m},default:function(){return h}});var a=n(7462),r=n(3366),o=(n(7294),n(4137)),i=n(7225),l=n(8448),c=["components"],s={id:"touchAction",title:"touchAction",custom_edit_url:"https://github.com/Abhi6722/hackers-hub/edit/main/packages/webdriverio/src/commands/browser/touchAction.ts"},u=void 0,p={unversionedId:"api/browser/touchAction",id:"api/browser/touchAction",isDocsHomePage:!1,title:"touchAction",description:"The Touch Action API provides the basis of all gestures that can be automated in Appium.",source:"@site/docs/api/browser/_touchAction.md",sourceDirName:"api/browser",slug:"/api/browser/touchAction",permalink:"/docs/api/browser/touchAction",editUrl:"https://github.com/Abhi6722/hackers-hub/edit/main/packages/webdriverio/src/commands/browser/touchAction.ts",tags:[],version:"current",frontMatter:{id:"touchAction",title:"touchAction",custom_edit_url:"https://github.com/Abhi6722/hackers-hub/edit/main/packages/webdriverio/src/commands/browser/touchAction.ts"},sidebar:"api",previous:{title:"throttle",permalink:"/docs/api/browser/throttle"},next:{title:"uploadFile",permalink:"/docs/api/browser/uploadFile"}},m=[{value:"Usage",id:"usage",children:[],level:5},{value:"Parameters",id:"parameters",children:[],level:5},{value:"Example",id:"example",children:[],level:5}],d={toc:m};function h(e){var t=e.components,n=(0,r.Z)(e,c);return(0,o.kt)("wrapper",(0,a.Z)({},d,n,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("p",null,"The Touch Action API provides the basis of all gestures that can be automated in Appium.\nIt is currently only available to native apps and can not be used to interact with webapps.\nAt its core is the ability to chain together ",(0,o.kt)("em",{parentName:"p"},"ad hoc")," individual actions, which will then be\napplied to an element in the application on the device. The basic actions that can be used are:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"press (pass element or (",(0,o.kt)("inlineCode",{parentName:"li"},"x"),", ",(0,o.kt)("inlineCode",{parentName:"li"},"y"),") or both)"),(0,o.kt)("li",{parentName:"ul"},"longPress (pass element or (",(0,o.kt)("inlineCode",{parentName:"li"},"x"),", ",(0,o.kt)("inlineCode",{parentName:"li"},"y"),") or both)"),(0,o.kt)("li",{parentName:"ul"},"tap (pass element or (",(0,o.kt)("inlineCode",{parentName:"li"},"x"),", ",(0,o.kt)("inlineCode",{parentName:"li"},"y"),") or both)"),(0,o.kt)("li",{parentName:"ul"},"moveTo (pass absolute ",(0,o.kt)("inlineCode",{parentName:"li"},"x"),", ",(0,o.kt)("inlineCode",{parentName:"li"},"y")," coordinates)"),(0,o.kt)("li",{parentName:"ul"},"wait (pass ",(0,o.kt)("inlineCode",{parentName:"li"},"ms")," (as milliseconds))"),(0,o.kt)("li",{parentName:"ul"},"release (no arguments)")),(0,o.kt)("h5",{id:"usage"},"Usage"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-js"},"browser.touchAction(action)\n")),(0,o.kt)("h5",{id:"parameters"},"Parameters"),(0,o.kt)("table",null,(0,o.kt)("thead",{parentName:"table"},(0,o.kt)("tr",{parentName:"thead"},(0,o.kt)("th",{parentName:"tr",align:null},"Name"),(0,o.kt)("th",{parentName:"tr",align:null},"Type"),(0,o.kt)("th",{parentName:"tr",align:null},"Details"))),(0,o.kt)("tbody",{parentName:"table"},(0,o.kt)("tr",{parentName:"tbody"},(0,o.kt)("td",{parentName:"tr",align:null},(0,o.kt)("code",null,(0,o.kt)("var",null,"action"))),(0,o.kt)("td",{parentName:"tr",align:null},(0,o.kt)("code",null,"TouchActions")),(0,o.kt)("td",{parentName:"tr",align:null},"action to execute")))),(0,o.kt)("h5",{id:"example"},"Example"),(0,o.kt)(i.Z,{defaultValue:"async",className:"runtime",values:[{label:"Asynchronous Mode",value:"async"},{label:"Synchronous Mode",value:"sync"}],mdxType:"Tabs"},(0,o.kt)(l.Z,{value:"async",mdxType:"TabItem"},(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-js",metastring:'title="touchAction.js"',title:'"touchAction.js"'},"it('should do a touch gesture', async () => {\n const screen = await $('//UITextbox');\n\n // simple touch action on element\n await browser.touchAction({\n action: 'tap',\n element: screen\n });\n\n // simple touch action x y variables\n // tap location is 30px right and 20px down relative from the viewport\n await browser.touchAction({\n action: 'tap',\n x: 30,\n y:20\n })\n\n // simple touch action x y variables\n // tap location is 30px right and 20px down relative from the center of the element\n await browser.touchAction({\n action: 'tap',\n x: 30,\n y:20,\n element: screen\n })\n\n // multi action on an element\n // drag&drop from position 200x200 down 100px on the screen\n await browser.touchAction([\n { action: 'press', x: 200, y: 200 },\n { action: 'moveTo', x: 200, y: 300 },\n 'release'\n ])\n});\n"))),(0,o.kt)(l.Z,{value:"sync",mdxType:"TabItem"},(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-js",metastring:'title="touchAction.js"',title:'"touchAction.js"'},"it('should do a touch gesture', () => {\n const screen = $('//UITextbox');\n\n // simple touch action on element\n browser.touchAction({\n action: 'tap',\n element: screen\n });\n\n // simple touch action x y variables\n // tap location is 30px right and 20px down relative from the viewport\n browser.touchAction({\n action: 'tap',\n x: 30,\n y:20\n })\n\n // simple touch action x y variables\n // tap location is 30px right and 20px down relative from the center of the element\n browser.touchAction({\n action: 'tap',\n x: 30,\n y:20,\n element: screen\n })\n\n // multi action on an element\n // drag&drop from position 200x200 down 100px on the screen\n browser.touchAction([\n { action: 'press', x: 200, y: 200 },\n { action: 'moveTo', x: 200, y: 300 },\n 'release'\n ])\n});\n")),(0,o.kt)("div",{className:"admonition admonition-caution alert alert--warning"},(0,o.kt)("div",{parentName:"div",className:"admonition-heading"},(0,o.kt)("h5",{parentName:"div"},(0,o.kt)("span",{parentName:"h5",className:"admonition-icon"},(0,o.kt)("svg",{parentName:"span",xmlns:"http://www.w3.org/2000/svg",width:"16",height:"16",viewBox:"0 0 16 16"},(0,o.kt)("path",{parentName:"svg",fillRule:"evenodd",d:"M8.893 1.5c-.183-.31-.52-.5-.887-.5s-.703.19-.886.5L.138 13.499a.98.98 0 0 0 0 1.001c.193.31.53.501.886.501h13.964c.367 0 .704-.19.877-.5a1.03 1.03 0 0 0 .01-1.002L8.893 1.5zm.133 11.497H6.987v-2.003h2.039v2.003zm0-3.004H6.987V5.987h2.039v4.006z"}))),"caution")),(0,o.kt)("div",{parentName:"div",className:"admonition-content"},(0,o.kt)("p",{parentName:"div"},"Synchronous Mode will depcrecated with Node.js v16. With an update to the\nunderlying Chromium version it became technically impossible to provide the\nsame synchronous behavior. We recommend to start transition to asynchronous\ncommand execution. For more information, see our ",(0,o.kt)("a",{href:"https://github.com/webdriverio/webdriverio/discussions/6702"},"RFC"),"."))))))}h.isMDXComponent=!0}}]); |
||
data_source_aws_elasticache_replication_group.go | package aws
import (
"fmt"
"log"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder"
)
func dataSourceAwsElasticacheReplicationGroup() *schema.Resource {
return &schema.Resource{
Read: dataSourceAwsElasticacheReplicationGroupRead,
Schema: map[string]*schema.Schema{
"replication_group_id": {
Type: schema.TypeString,
Required: true,
},
"replication_group_description": {
Type: schema.TypeString,
Computed: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"auth_token_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"automatic_failover_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"port": {
Type: schema.TypeInt,
Computed: true,
},
"configuration_endpoint_address": {
Type: schema.TypeString,
Computed: true,
},
"primary_endpoint_address": {
Type: schema.TypeString,
Computed: true,
},
"reader_endpoint_address": {
Type: schema.TypeString,
Computed: true,
},
"number_cache_clusters": {
Type: schema.TypeInt,
Computed: true,
},
"member_clusters": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"multi_az_enabled": {
Type: schema.TypeBool,
Computed: true,
},
"node_type": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_window": {
Type: schema.TypeString,
Computed: true,
},
"snapshot_retention_limit": {
Type: schema.TypeInt,
Computed: true,
},
},
}
}
func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta interface{}) error | {
conn := meta.(*AWSClient).elasticacheconn
groupID := d.Get("replication_group_id").(string)
rg, err := finder.ReplicationGroupByID(conn, groupID)
if err != nil {
return fmt.Errorf("error reading ElastiCache Replication Group (%s): %w", groupID, err)
}
d.SetId(aws.StringValue(rg.ReplicationGroupId))
d.Set("replication_group_description", rg.Description)
d.Set("arn", rg.ARN)
d.Set("auth_token_enabled", rg.AuthTokenEnabled)
if rg.AutomaticFailover != nil {
switch aws.StringValue(rg.AutomaticFailover) {
case elasticache.AutomaticFailoverStatusDisabled, elasticache.AutomaticFailoverStatusDisabling:
d.Set("automatic_failover_enabled", false)
case elasticache.AutomaticFailoverStatusEnabled, elasticache.AutomaticFailoverStatusEnabling:
d.Set("automatic_failover_enabled", true)
}
}
if rg.MultiAZ != nil {
switch strings.ToLower(aws.StringValue(rg.MultiAZ)) {
case elasticache.MultiAZStatusEnabled:
d.Set("multi_az_enabled", true)
case elasticache.MultiAZStatusDisabled:
d.Set("multi_az_enabled", false)
default:
log.Printf("Unknown MultiAZ state %q", aws.StringValue(rg.MultiAZ))
}
}
if rg.ConfigurationEndpoint != nil {
d.Set("port", rg.ConfigurationEndpoint.Port)
d.Set("configuration_endpoint_address", rg.ConfigurationEndpoint.Address)
} else {
if rg.NodeGroups == nil {
d.SetId("")
return fmt.Errorf("ElastiCache Replication Group (%s) doesn't have node groups", aws.StringValue(rg.ReplicationGroupId))
}
d.Set("port", rg.NodeGroups[0].PrimaryEndpoint.Port)
d.Set("primary_endpoint_address", rg.NodeGroups[0].PrimaryEndpoint.Address)
d.Set("reader_endpoint_address", rg.NodeGroups[0].ReaderEndpoint.Address)
}
d.Set("number_cache_clusters", len(rg.MemberClusters))
if err := d.Set("member_clusters", flattenStringList(rg.MemberClusters)); err != nil {
return fmt.Errorf("error setting member_clusters: %w", err)
}
d.Set("node_type", rg.CacheNodeType)
d.Set("snapshot_window", rg.SnapshotWindow)
d.Set("snapshot_retention_limit", rg.SnapshotRetentionLimit)
return nil
} |
|
abstract_classifier.py | import abc
class | :
""" Abstract class with specific methods for classifier models (training, validation and test) """
def __init__(self):
pass
@abc.abstractmethod
def train(self, config, train_data):
"""
Classifier training.
:param config: Model configuration.
:param train_data: Train dataset with the textual information of each item and its label.
:return: A model trained with train_data according to config.
"""
pass
@abc.abstractmethod
def validation(self, config, val_data):
"""
:param config: Model configuration.
:param val_data: Validation dataset with the textual information of each item and its label.
:return: Validation metrics
"""
pass
@abc.abstractmethod
def test(self, config, test_data):
"""
Classifier testing.
:param config: Model configuration.
:param test_data: Test dataset with the textual information of each item and its label.
:return: Predictions of the model in the test_data, according to config.
"""
pass
| AbstractClassifier |
test_config_api_manager.py | import pytest
from app.api.v2 import errors
from app.api.v2.managers import config_api_manager
from app.api.v2.managers.config_api_manager import ConfigApiManager, ConfigNotFound, ConfigUpdateNotAllowed
from app.utility.base_world import BaseWorld
class StubDataService:
def __init__(self,):
self.abilities = []
async def locate(self, key):
assert key == 'abilities'
return self.abilities
@pytest.fixture
def base_world():
main_conf = {
'app.contact.dns.domain': 'mycaldera.caldera',
'app.contact.dns.socket': '0.0.0.0:8853',
'app.contact.html': '/weather',
'app.contact.http': 'http://0.0.0.0:8888',
'app.contact.tcp': '0.0.0.0:7010',
'app.contact.tunnel.ssh.socket': '0.0.0.0:8022',
'app.contact.udp': '0.0.0.0:7013',
'app.contact.websocket': '0.0.0.0:7012',
'exfil_dir': '/tmp/caldera',
'plugins': [
'stockpile',
'atomic'
],
'reports_dir': '/tmp',
'host': '0.0.0.0',
'auth.login.handler.module': 'default',
'users': {
'red': {
'red': 'password-foo'
},
'blue': {
'blue': 'password-bar'
}
}
}
agents_conf = {
'sleep_min': '30',
'sleep_max': '60',
'untrusted_timer': '90',
'watchdog': '0',
'implant_name': 'splunkd',
'deadman_abilities': [
'this-is-a-fake-ability'
],
'bootstrap_abilities': [
'this-is-another-fake-ability'
]
}
BaseWorld.clear_config()
BaseWorld.apply_config('main', main_conf)
BaseWorld.apply_config('agents', agents_conf)
yield BaseWorld
BaseWorld.clear_config()
def test_filter_keys():
mapping = {
'foo': 1,
'bar': 2,
'baz': {
'key3': 3,
'key4': 4
}
}
filtered = config_api_manager.filter_keys(mapping, keys_to_remove=['baz', 'bar'])
expected = {'foo': 1}
assert filtered == expected
def test_get_filtered_config_remove_sensitive_keys(base_world, data_svc):
test_conf = {
'users': 'this should be filtered',
'host': 'this should be filtered',
'foo': '1',
'bar': '2',
'baz': '3'
}
base_world.apply_config('test', test_conf)
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
expected = {
'foo': '1',
'bar': '2',
'baz': '3'
}
assert filtered == expected
def test_get_filtered_config_all_sensitive_keys_filtered(base_world, data_svc):
sensitive_conf = {key: 'foo' for key in config_api_manager.SENSITIVE_CONFIG_PROPS}
base_world.apply_config('test', sensitive_conf)
assert base_world.get_config(name='test') == sensitive_conf
manager = ConfigApiManager(data_svc, None)
filtered = manager.get_filtered_config('test')
assert filtered == {}
def test_get_filtered_config_throws_exception_on_not_found(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigNotFound):
manager.get_filtered_config('THIS DOES NOT EXIST')
def test_update_main_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
manager.update_main_config(prop='foo.bar', value=100)
assert manager.get_filtered_config('main')['foo.bar'] == 100
def test_update_main_config_throws_exception_on_sensitive_field(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(ConfigUpdateNotAllowed):
manager.update_main_config(prop='host', value='this is not allowed')
async def test_update_global_agent_config(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
await manager.update_global_agent_config(sleep_min=5, sleep_max=10)
agent_config = manager.get_filtered_config('agents')
assert agent_config['sleep_min'] == 5
assert agent_config['sleep_max'] == 10
async def test_update_global_agent_config_allows_partial_updates(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
agent_config = manager.get_filtered_config('agents')
await manager.update_global_agent_config() # no arguments passed in--should no-op
assert manager.get_filtered_config('agents') == agent_config
async def test_update_global_agent_config_updates_list_properties(base_world, ability):
stub_data_svc = StubDataService()
stub_data_svc.abilities = [
ability('ability-1'),
ability('ability-2'),
ability('ability-3')
]
manager = ConfigApiManager(data_svc=stub_data_svc, file_svc=None)
await manager.update_global_agent_config(
deadman_abilities=['ability-1', 'ability-2'],
bootstrap_abilities=['ability-3']
)
agent_config = manager.get_filtered_config('agents')
assert agent_config['deadman_abilities'] == ['ability-1', 'ability-2']
assert agent_config['bootstrap_abilities'] == ['ability-3']
async def test_update_global_agent_config_throws_validation_error_bad_sleep_min(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_min=-1)
async def test_update_global_agent_config_throws_validation_error_bad_sleep_max(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(sleep_max=-1)
async def test_update_global_agent_config_throws_validation_error_bad_watchdog(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(watchdog=-1)
async def | (base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(untrusted_timer=-1)
async def test_update_global_agent_config_throws_validation_error_bad_implant_name(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_global_agent_config(implant_name='')
async def test_update_main_config_throws_validation_error_empty_prop(base_world, data_svc):
manager = ConfigApiManager(data_svc, None)
with pytest.raises(errors.DataValidationError):
await manager.update_main_config(prop='', value=1234)
| test_update_global_agent_config_throws_validation_error_bad_untrusted_timer |
pbmodel.go | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"github.com/pingcap/parser/ast"
parser_model "github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/types"
"github.com/pingcap/pd/v4/server/schedule/placement"
tspb "github.com/zhihu/zetta-proto/pkg/tablestore"
"github.com/zhihu/zetta/tablestore/rpc"
)
const (
AttrKeyMode = "mode"
AttrKeyLayout = "layout"
AttrFlexible = "flexible"
AttrFixed = "fixed"
AttrCompact = "compact"
AttrSparse = "sparse"
)
type TableMeta struct {
Id int64 `json:"id"`
tspb.TableMeta
parser_model.TableInfo
MaxColumnFamilyID int64 `json:"max_cf_id`
Columns []*ColumnMeta `json:"columns"`
ColumnFamilies []*ColumnFamilyMeta `json:"columnfamilies"`
Indices []*IndexMeta `json:"indices"`
Rules []*placement.Rule `json:"rules"`
}
func NewTableMetaFromPb(tm *tspb.TableMeta) *TableMeta {
ntm := &TableMeta{
TableMeta: *tm,
TableInfo: parser_model.TableInfo{
Name: parser_model.NewCIStr(tm.TableName),
},
}
ntm.transferColumns()
ntm.transferColumnFamilies()
return ntm
}
func NewTableMetaFromPbReq(tmr *tspb.CreateTableRequest) *TableMeta {
tm := NewTableMetaFromPb(tmr.GetTableMeta())
indices := tmr.GetIndexes()
tm.addIndices(indices)
return tm
}
func (tm *TableMeta) GetColumnIDMap() map[string]int {
res := make(map[string]int, len(tm.Columns))
for i, col := range tm.Columns {
res[col.Name] = i
}
return res
}
func (tm *TableMeta) ToTableInfo() *parser_model.TableInfo {
return &tm.TableInfo
}
func (tm *TableMeta) GetIndexes() []*IndexMeta {
return tm.Indices
}
func (tm *TableMeta) addIndices(indices []*tspb.IndexMeta) {
nindices := make([]*IndexMeta, 0)
for _, i := range indices {
nindices = append(nindices, NewIndexMetaFromPb(i))
}
tm.Indices = nindices
}
func (tm *TableMeta) GetPrimaryFieldTypes() []*types.FieldType {
res := make([]*types.FieldType, 0)
for _, p := range tm.TableMeta.GetPrimaryKey() {
for _, c := range tm.Columns {
if p == c.ColumnMeta.Name |
}
}
return res
}
func (tm *TableMeta) GetColumnMap() (map[string]*ColumnMeta, map[int64]*ColumnMeta) {
resName := make(map[string]*ColumnMeta)
resId := make(map[int64]*ColumnMeta)
for _, c := range tm.Columns {
resName[c.ColumnMeta.Name] = c
resId[c.Id] = c
}
return resName, resId
}
func (tm *TableMeta) transferColumns() {
clms := make([]*ColumnMeta, 0)
for _, c := range tm.TableMeta.Columns {
clms = append(clms, NewColumnMetaFromPb(c))
}
tm.Columns = clms
}
func (tm *TableMeta) transferColumnFamilies() {
cfs := make([]*ColumnFamilyMeta, 0)
for _, cf := range tm.TableMeta.ColumnFamilies {
ncf := &ColumnFamilyMeta{
ColumnFamilyMeta: *cf,
}
cfs = append(cfs, ncf)
}
tm.ColumnFamilies = cfs
}
func (tm *TableMeta) FindColumnByName(name string) *ColumnMeta {
for _, col := range tm.Columns {
if col.ColumnMeta.Name == name {
return col
}
}
return nil
}
func (tm *TableMeta) FindIndexByName(name string) *IndexMeta {
for _, idx := range tm.Indices {
if idx.Name == name {
return idx
}
}
return nil
}
//TODO: One column may be in multiple index. Now just return the first.
func (tm *TableMeta) GetIndexByColumnName(name string) (int, *IndexMeta) {
for _, idx := range tm.Indices {
for i, col := range idx.DefinedColumns {
if col == name {
return i, idx
}
}
}
for i, col := range tm.PrimaryKey {
if col == name {
return i, nil
}
}
return -1, nil
}
// TODO: Partition table.
type PartitionInfoZ struct{}
// GetPartitionInfo returns the partition information.
func (t *TableMeta) GetPartitionInfo() *PartitionInfo {
return nil
}
type DatabaseMeta struct {
tspb.DatabaseMeta
State SchemaState `json:"state"`
}
func NewDatabaseMetaFromPb(dbmeta *tspb.DatabaseMeta) *DatabaseMeta {
return &DatabaseMeta{
DatabaseMeta: *dbmeta,
}
}
func NewDatabaseMetaFromPbReq(dbreq *tspb.CreateDatabaseRequest) *DatabaseMeta {
return &DatabaseMeta{
DatabaseMeta: tspb.DatabaseMeta{
Database: dbreq.Database,
},
}
}
type ColumnFamilyMeta struct {
tspb.ColumnFamilyMeta
State SchemaState `json:"state"`
}
func DefaultColumnFamilyMeta() *ColumnFamilyMeta {
return &ColumnFamilyMeta{
ColumnFamilyMeta: tspb.ColumnFamilyMeta{
Id: 0,
Name: "default",
Attributes: map[string]string{
AttrKeyMode: AttrFixed,
AttrKeyLayout: AttrCompact,
},
},
}
}
type ColumnMeta struct {
Name string `json:"name"`
Id int64 `json:"id"`
tspb.ColumnMeta
parser_model.ColumnInfo
}
type ColumnMetaDummy struct {
Name string `json:"name"`
Id int64 `json:"id"`
tspb.ColumnMeta
parser_model.ColumnInfo
}
func (cm *ColumnMeta) UnmarshalJSON(b []byte) error {
cmd := ColumnMetaDummy{}
err := json.Unmarshal(b, &cmd)
if err != nil {
return err
}
cm.Name = cmd.Name
cm.ColumnMeta = cmd.ColumnMeta
cm.ColumnInfo = cmd.ColumnInfo
cm.ColumnInfo.Name = parser_model.NewCIStr(cm.Name)
cm.ColumnMeta.Name = cm.Name
cm.ColumnMeta.Id = cmd.Id
cm.ColumnInfo.ID = cmd.Id
cm.Id = cmd.Id
return nil
}
func NewColumnMetaFromColumnDef(columnDef *ast.ColumnDef) *ColumnMeta {
cm := &ColumnMeta{}
cm.ColumnMeta.Name = columnDef.Name.Name.L
cm.ColumnInfo.Name = columnDef.Name.Name
cm.Name = cm.ColumnMeta.Name
cm.FieldType = *columnDef.Tp
return cm
}
func removeOnUpdateNowFlag(c *ColumnMeta) {
// For timestamp Col, if it is set null or default value,
// OnUpdateNowFlag should be removed.
if mysql.HasTimestampFlag(c.Flag) {
c.Flag &= ^mysql.OnUpdateNowFlag
}
}
func (c *ColumnMeta) ToColumnInfo() *parser_model.ColumnInfo {
return &c.ColumnInfo
}
func GetPrimaryKeysFromConstraints(cons *ast.Constraint) []string {
pkeys := make([]string, 0)
for _, key := range cons.Keys {
pkeys = append(pkeys, key.Column.Name.L)
}
return pkeys
}
func FieldTypeToProtoType(ft *types.FieldType) *tspb.Type {
switch ft.Tp {
case mysql.TypeTiny:
return rpc.BoolType()
case mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeShort:
return rpc.IntType()
case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeDecimal:
return rpc.FloatType()
case mysql.TypeTimestamp, mysql.TypeDuration:
return rpc.TimeType()
case mysql.TypeDate, mysql.TypeDatetime:
return rpc.DateType()
case mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeString:
return rpc.StringType()
case mysql.TypeBit, mysql.TypeBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeTinyBlob:
return rpc.BytesType()
case mysql.TypeSet:
return rpc.ListType(rpc.StringType())
case mysql.TypeJSON:
return &tspb.Type{Code: tspb.TypeCode_STRUCT}
default:
return &tspb.Type{Code: tspb.TypeCode_TYPE_CODE_UNSPECIFIED}
}
}
func (c *ColumnMeta) TypeTransfer() {
switch c.ColumnType.Code {
case tspb.TypeCode_BOOL:
c.FieldType = *types.NewFieldType(mysql.TypeTiny)
case tspb.TypeCode_INT64:
c.FieldType = *types.NewFieldType(mysql.TypeLonglong)
case tspb.TypeCode_FLOAT64:
c.FieldType = *types.NewFieldType(mysql.TypeDouble)
case tspb.TypeCode_TIMESTAMP:
c.FieldType = *types.NewFieldType(mysql.TypeTimestamp)
case tspb.TypeCode_DATE:
c.FieldType = *types.NewFieldType(mysql.TypeDate)
case tspb.TypeCode_STRING:
c.FieldType = *types.NewFieldType(mysql.TypeVarchar)
case tspb.TypeCode_BYTES:
c.FieldType = *types.NewFieldType(mysql.TypeBlob)
case tspb.TypeCode_ARRAY:
c.FieldType = *types.NewFieldType(mysql.TypeSet)
case tspb.TypeCode_STRUCT:
c.FieldType = *types.NewFieldType(mysql.TypeJSON)
}
}
func NewColumnMetaFromPb(cm *tspb.ColumnMeta) *ColumnMeta {
ncm := &ColumnMeta{
ColumnMeta: *cm,
}
ncm.TypeTransfer()
return ncm
}
type IndexMeta struct {
tspb.IndexMeta
IsPrimary bool
State SchemaState `json:"state"`
}
func NewIndexMetaFromPb(idxm *tspb.IndexMeta) *IndexMeta {
nidxm := &IndexMeta{
IndexMeta: *idxm,
}
return nidxm
}
func NewIndexMetaFromPbReq(idxreq *tspb.CreateIndexRequest) *IndexMeta {
idx := &IndexMeta{
IndexMeta: *idxreq.Indexes,
}
return idx
}
func NewIndexMetaFromConstraits(cons *ast.Constraint) *IndexMeta {
idx := &IndexMeta{}
idx.DefinedColumns = make([]string, len(cons.Keys))
idx.Name = cons.Name
switch cons.Tp {
case ast.ConstraintIndex:
idx.Unique = false
case ast.ConstraintUniq:
idx.Unique = true
}
for i, key := range cons.Keys {
idx.DefinedColumns[i] = key.Column.Name.L
}
return idx
}
| {
res = append(res, &c.FieldType)
} |
groupDRO.py | import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.initializer import initialize_model
class GroupDRO(SingleModelAlgorithm):
"""
Group distributionally robust optimization.
Original paper:
@inproceedings{sagawa2019distributionally,
title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle={International Conference on Learning Representations},
year={2019}
}
"""
def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):
# check config
assert config.uniform_over_groups
# initialize model
model = initialize_model(config, d_out).to(config.device)
# initialize module
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
# additional logging
self.logged_fields.append('group_weight')
# step size
self.group_weights_step_size = config.group_dro_step_size
# initialize adversarial weights
self.group_weights = torch.zeros(grouper.n_groups)
self.group_weights[is_group_in_train] = 1
self.group_weights = self.group_weights/self.group_weights.sum()
self.group_weights = self.group_weights.to(self.device)
def process_batch(self, batch):
"""
A helper function for update() and evaluate() that processes the batch
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
all Tensors are of size (batch_size,)
"""
results = super().process_batch(batch)
results['group_weight'] = self.group_weights
return results
def | (self, results):
"""
Takes an output of SingleModelAlgorithm.process_batch() and computes the
optimized objective. For group DRO, the objective is the weighted average
of losses, where groups have weights groupDRO.group_weights.
Args:
- results (dictionary): output of SingleModelAlgorithm.process_batch()
Output:
- objective (Tensor): optimized objective; size (1,).
"""
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
return group_losses @ self.group_weights
def _update(self, results):
"""
Process the batch, update the log, and update the model, group weights, and scheduler.
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch, such as:
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
- objective (float)
"""
# compute group losses
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
# update group weights
self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)
self.group_weights = (self.group_weights/(self.group_weights.sum()))
# save updated group weights
results['group_weight'] = self.group_weights
# update model
super()._update(results)
| objective |
customer_signups.py | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
from mcfw.rpc import arguments, returns
from rogerthat.models import Message
from rogerthat.models.properties.forms import FormResult
from rogerthat.rpc import users
from rogerthat.service.api import messaging
from rogerthat.to.messaging.forms import TextBlockFormTO, TextBlockTO, FormTO
from rogerthat.to.messaging.service_callback_results import FormAcknowledgedCallbackResultTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.utils.app import get_app_user_tuple
from solutions import translate
from solutions.common.dal import get_solution_main_branding, get_solution_settings
from solutions.common.models import SolutionInboxMessage
@arguments(service_user=users.User, service_identity=unicode, message_key=unicode, app_user=users.User, name=unicode,
answer_id=unicode, parent_inbox_message=SolutionInboxMessage)
def process_updated_customer_signup_message(service_user, service_identity, message_key, app_user, name, answer_id,
parent_inbox_message):
# type: (users.User, unicode, unicode, users.User, unicode, unicode, SolutionInboxMessage) -> None
from solutions.common.bizz.messaging import MESSAGE_TAG_DENY_SIGNUP
from solutions.common.restapi.services import rest_create_service_from_signup
with users.set_user(service_user):
sln_settings = get_solution_settings(service_user)
if answer_id == 'decline':
widget = TextBlockTO()
widget.max_chars = 1024
form = TextBlockFormTO()
form.type = TextBlockTO.TYPE
form.widget = widget
form.positive_button = translate(sln_settings.main_language, 'Confirm')
form.negative_button = translate(sln_settings.main_language, 'Cancel')
form.javascript_validation = """function run(result) {
return result.value ? true : '%s';
}""" % translate(sln_settings.main_language, 'this_field_is_required', _duplicate_backslashes=True)
human_user, app_id = get_app_user_tuple(app_user)
messaging.send_form(parent_key=parent_inbox_message.message_key,
parent_message_key=parent_inbox_message.message_key,
message=translate(sln_settings.main_language, 'signup_not_ok'),
member=human_user.email(),
app_id=app_id,
flags=Message.FLAG_AUTO_LOCK,
branding=get_solution_main_branding(service_user).branding_key,
tag=json.dumps({'__rt__.tag': MESSAGE_TAG_DENY_SIGNUP,
'signup_key': parent_inbox_message.category_key}),
form=form,
service_identity=service_identity,
alert_flags=Message.ALERT_FLAG_VIBRATE)
elif answer_id == 'approve':
result = rest_create_service_from_signup(parent_inbox_message.category_key,
force=True) # type: CreateServiceStatusTO
if not result.success:
messaging.send(parent_message_key=message_key,
message=result.errormsg,
answers=[],
flags=Message.FLAG_ALLOW_DISMISS,
branding=get_solution_main_branding(service_user).branding_key,
tag=None,
service_identity=service_identity)
@returns(FormAcknowledgedCallbackResultTO)
@arguments(service_user=users.User, status=int, form_result=FormResult, answer_id=unicode, member=unicode,
message_key=unicode, tag=unicode, received_timestamp=int, acked_timestamp=int, parent_message_key=unicode,
result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO])
def | (service_user, status, form_result, answer_id, member, message_key, tag,
received_timestamp, acked_timestamp, parent_message_key, result_key,
service_identity, user_details):
from solutions.common.restapi import rest_customer_signup_reply
with users.set_user(service_user):
if answer_id == FormTO.POSITIVE:
tag_dict = json.loads(tag)
rest_customer_signup_reply(tag_dict['signup_key'], form_result.result.value)
| deny_signup |
new_map_gen.go | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// This file was automatically generated by genny.
// Any changes will be lost if this file is regenerated.
// see https://github.com/mauricelam/genny
package repair
import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
"github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// MapOptions provides options used when created the map.
type MapOptions struct {
InitialSize int
KeyCopyPool pool.BytesPool
}
// NewMap returns a new byte keyed map.
func NewMap(opts MapOptions) *Map | {
var (
copyFn CopyFn
finalizeFn FinalizeFn
)
if pool := opts.KeyCopyPool; pool == nil {
copyFn = func(k ident.ID) ident.ID {
return ident.BytesID(append([]byte(nil), k.Bytes()...))
}
} else {
copyFn = func(k ident.ID) ident.ID {
bytes := k.Bytes()
keyLen := len(bytes)
pooled := pool.Get(keyLen)[:keyLen]
copy(pooled, bytes)
return ident.BytesID(pooled)
}
finalizeFn = func(k ident.ID) {
if slice, ok := k.(ident.BytesID); ok {
pool.Put(slice)
}
}
}
return mapAlloc(mapOptions{
hash: func(id ident.ID) MapHash {
return MapHash(xxhash.Sum64(id.Bytes()))
},
equals: func(x, y ident.ID) bool {
return x.Equal(y)
},
copy: copyFn,
finalize: finalizeFn,
initialSize: opts.InitialSize,
})
} |
|
delete_test.py | from requests import delete
delete('http://localhost:5000/delete_pokemon/0010').json() |
||
vmware_amd64.go | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The vmware provider fetches a configuration from the VMware Guest Info
// interface.
package vmware
import (
"net/url"
"github.com/flatcar-linux/ignition/config/validate/report"
"github.com/flatcar-linux/ignition/internal/config/types"
"github.com/flatcar-linux/ignition/internal/providers"
"github.com/flatcar-linux/ignition/internal/providers/util"
"github.com/flatcar-linux/ignition/internal/resource"
"github.com/sigma/vmw-guestinfo/rpcvmx"
"github.com/sigma/vmw-guestinfo/vmcheck"
"github.com/vmware/vmw-ovflib"
)
func | (f *resource.Fetcher) (types.Config, report.Report, error) {
if !vmcheck.IsVirtualWorld() {
return types.Config{}, report.Report{}, providers.ErrNoProvider
}
config, err := fetchDataConfig(f)
if err == nil && len(config) == 0 {
config, err = fetchUrlConfig(f)
}
if err != nil {
return types.Config{}, report.Report{}, err
}
f.Logger.Debug("config successfully fetched")
return util.ParseConfig(f.Logger, config)
}
func fetchDataConfig(f *resource.Fetcher) ([]byte, error) {
var data string
var encoding string
var err error
data, err = getVariable(f, "ignition.config.data")
if err == nil && data != "" {
encoding, err = getVariable(f, "ignition.config.data.encoding")
} else {
data, err = getVariable(f, "coreos.config.data")
if err == nil && data != "" {
encoding, err = getVariable(f, "coreos.config.data.encoding")
}
}
// Do not check against err from "encoding" because leaving it empty is ok
if data == "" {
f.Logger.Debug("failed to fetch config")
return []byte{}, nil
}
decodedData, err := decodeConfig(config{
data: data,
encoding: encoding,
})
if err != nil {
f.Logger.Debug("failed to decode config: %v", err)
return nil, err
}
return decodedData, nil
}
func fetchUrlConfig(f *resource.Fetcher) ([]byte, error) {
rawUrl, err := getVariable(f, "ignition.config.url")
if err != nil || rawUrl == "" {
rawUrl, err = getVariable(f, "coreos.config.url")
}
if err != nil || rawUrl == "" {
f.Logger.Info("no config URL provided")
return []byte{}, nil
}
f.Logger.Debug("found url: %q", rawUrl)
url, err := url.Parse(rawUrl)
if err != nil {
f.Logger.Err("failed to parse url: %v", err)
return nil, err
}
if url == nil {
return []byte{}, nil
}
data, err := f.FetchToBuffer(*url, resource.FetchOptions{
Headers: resource.ConfigHeaders,
})
if err != nil {
return nil, err
}
return data, nil
}
func getVariable(f *resource.Fetcher, key string) (string, error) {
info := rpcvmx.NewConfig()
var ovfData string
ovfEnv, err := info.String("ovfenv", "")
if err != nil {
f.Logger.Warning("failed to fetch ovfenv: %v. Continuing...", err)
} else if ovfEnv != "" {
f.Logger.Debug("using OVF environment from guestinfo")
env, err := ovf.ReadEnvironment([]byte(ovfEnv))
if err != nil {
f.Logger.Warning("failed to parse OVF environment: %v. Continuing...", err)
} else {
ovfData = env.Properties["guestinfo."+key]
}
}
// The guest variables get preference over the ovfenv variables which are given here as fallback
data, err := info.String(key, ovfData)
if err != nil {
f.Logger.Debug("failed to fetch variable, falling back to ovfenv value: %v", err)
return ovfData, nil
}
// An empty string will be returned if nothing was found
return data, nil
}
| FetchConfig |
options.go | // Copyright 2022 Mailchain Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envelope
import (
"github.com/mailchain/mailchain/internal/mli"
"github.com/pkg/errors"
)
// CreateOptionsBuilder creates the options to derive a key from scrypt.
type CreateOptionsBuilder func(*CreateOpts)
// CreateOpts for building an envelope.
type CreateOpts struct {
// URL of message.
URL string
// DecryptedHash use to verify the decrypted contents have not been tampered with.
DecryptedHash []byte
// EncryptedHash use to verify the encrypted contents have not been tampered with.
EncryptedHash []byte
// Resource id of the message.
Resource string
// Kind type of envelope used
Kind byte
// Location maps to an addressable location.
Location uint64
// EncryptedContents message after its been encrypted.
EncryptedContents []byte
}
// func (d CreateOpts) Kind() byte { return d.Kind }
// WithKind creates options builder with envelope type identifier.
func WithKind(kind byte) CreateOptionsBuilder {
return func(o *CreateOpts) { o.Kind = kind }
}
// WithURL creates options builder with an encrypted URL.
func WithURL(address string) CreateOptionsBuilder {
return func(o *CreateOpts) { o.URL = address }
}
// WithResource creates options builder with a resource location.
func WithResource(resource string) CreateOptionsBuilder {
return func(o *CreateOpts) { o.Resource = resource }
}
// WithEncryptedContents creates options builder with a the encrypted content of the message.
func WithEncryptedContents(encryptedContents []byte) CreateOptionsBuilder |
// WithMessageLocationIdentifier creates options builder with a message location identifier.
func WithMessageLocationIdentifier(msgLocInd uint64) (CreateOptionsBuilder, error) {
_, ok := mli.ToAddress()[msgLocInd]
if !ok && msgLocInd != 0 {
return func(o *CreateOpts) {}, errors.Errorf("unknown mli %q", msgLocInd)
}
return func(o *CreateOpts) { o.Location = msgLocInd }, nil
}
// WithDecryptedHash creates options builder with the decrypted hash.
func WithDecryptedHash(decryptedHash []byte) CreateOptionsBuilder {
return func(o *CreateOpts) { o.DecryptedHash = decryptedHash }
}
// WithEncryptedHash creates options builder with the encrypted hash.
func WithEncryptedHash(encryptedHash []byte) CreateOptionsBuilder {
return func(o *CreateOpts) { o.EncryptedHash = encryptedHash }
}
| {
return func(o *CreateOpts) { o.EncryptedContents = encryptedContents }
} |
Memory_RNN.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 11:28:50 2017
@author: dhingratul
"""
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import helpers
# hyperparams
num_epochs = 10000
total_series_length = 100
truncated_backprop_length = 5
state_size = 4 # Number of neurons in the hidden layer
num_classes = 2 # Data is binary, 0 / 1 = Two Classes
batch_size = 8
num_batches = total_series_length//batch_size//truncated_backprop_length
# Step 1 - Data Generation
# Generate integers and corresponding binary numbers randomly selected in a
# range of 10,000. The data points are zero padded so as to make a constant
# lenght of 100
shift_batch = 0
def generateData(shift_batch):
|
# Step 2 - Build the Model
batchX_placeholder = tf.placeholder(
tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(
tf.int32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
# Randomly initialize weights
W = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)
b = tf.Variable(np.zeros((1, state_size)), dtype=tf.float32)
W2 = tf.Variable(np.random.rand(state_size, num_classes), dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1)
# Forward pass
# State placeholder
current_state = init_state
# series of states through time
states_series = []
# For each set of inputs, forward pass through the network to get new state
# values and store all states in memory
for current_input in inputs_series:
current_input = tf.reshape(current_input, [batch_size, 1])
# Concatenate state and input data
input_and_state_concatenated = tf.concat(
axis=1, values=[current_input, current_state])
next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)
# Store the state in memory
states_series.append(next_state)
# Set current state to next one
current_state = next_state
# Calculate loss
logits_series = [tf.matmul(state, W2) + b2 for state in states_series]
# Softmax Non-linearity
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
# Measure loss, calculate softmax again on logits, then compute cross entropy
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels) for logits,
labels in zip(logits_series, labels_series)]
# Average Loss
total_loss = tf.reduce_mean(losses)
# Use adagrad for minimization
train_step = tf.train.AdagradOptimizer(0.2).minimize(total_loss)
# Step 3 Training the network
with tf.Session() as sess:
y = np.zeros([batch_size])
sess.run(tf.global_variables_initializer())
loss_list = []
for epoch_idx in range(num_epochs):
# Generate new data at every epoch
x, y = generateData(shift_batch)
while (len(y) > 8 or len(y) < 8):
x, y = generateData(shift_batch)
# Empty hidden state
_current_state = np.zeros((batch_size, state_size))
print("epoch", epoch_idx)
for batch_idx in range(num_batches):
# layers unrolled to a limited number of time-steps:
# truncated length
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
batchY = y[:, start_idx:end_idx]
# Run the computation graph, give it the values
_total_loss, _train_step, _current_state, _predictions_series = \
sess.run(
[total_loss, train_step, current_state,
predictions_series],
feed_dict={
batchX_placeholder: batchX,
batchY_placeholder: batchY,
init_state: _current_state
})
# print(batchX, batchY)
loss_list.append(_total_loss)
if batch_idx % 100 == 0:
print("Loss", _total_loss)
| vector_size = 100
batches = helpers.random_sequences(length_from=3, length_to=8,
vocab_lower=0, vocab_upper=2,
batch_size=vector_size)
batch = next(batches)
x, _ = helpers.batch(batch)
if shift_batch == 0: # Learning the same sequence
y = x
else:
y_inter2 = helpers.shifter(batch, shift_batch)
y, _ = helpers.batch(y_inter2)
return x, y |
epoll_entry.rs | // Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::qlib::mutex::*;
use alloc::sync::Arc;
use core::ops::Deref;
use super::super::super::super::linux_def::*;
use super::super::super::fs::file::*;
use super::super::waiter::*;
use super::epoll::*;
pub type EntryFlags = i32;
pub const ONE_SHOT: EntryFlags = 1 << 0;
pub const EDGE_TRIGGERED: EntryFlags = 1 << 1;
#[derive(Clone)]
pub struct FileIdentifier {
pub File: FileWeak,
pub Fd: i32,
}
impl Ord for FileIdentifier {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.File.Upgrade().cmp(&other.File.Upgrade())
}
}
impl PartialOrd for FileIdentifier {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for FileIdentifier {
fn eq(&self, other: &Self) -> bool {
return self.File.Upgrade() == other.File.Upgrade();
}
}
impl Eq for FileIdentifier {}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PollEntryState {
Ready,
Waiting,
Disabled,
}
pub struct PollEntryInternal {
pub next: Option<PollEntry>,
pub prev: Option<PollEntry>,
pub id: FileIdentifier,
pub userData: [i32; 2],
pub waiter: WaitEntry,
pub mask: EventMask,
pub flags: EntryFlags,
pub epoll: EventPoll,
pub state: PollEntryState,
}
#[derive(Debug, Clone, Copy)]
pub struct ReadyState {
pub mask: u32,
}
#[derive(Clone)]
pub struct PollEntry(pub Arc<QMutex<PollEntryInternal>>);
impl PollEntry {
pub fn CallBack(&self) {
let epoll = self.lock().epoll.clone();
let mut lists = epoll.lists.lock();
let state = self.lock().state;
if state == PollEntryState::Waiting {
self.SetReady();
lists.waitingList.Remove(self);
lists.readyList.PushBack(self);
epoll.queue.Notify(READABLE_EVENT);
}
}
pub fn SetReady(&self) -> PollEntryState {
let mut e = self.lock();
let oldstate = e.state;
e.state = PollEntryState::Ready;
return oldstate;
}
pub fn | (&self) -> u32 {
return self.lock().mask as u32;
}
pub fn Id(&self) -> i32 {
return self.lock().id.Fd;
}
pub fn Reset(&self) {
self.lock().prev = None;
self.lock().next = None;
}
}
impl Deref for PollEntry {
type Target = Arc<QMutex<PollEntryInternal>>;
fn deref(&self) -> &Arc<QMutex<PollEntryInternal>> {
&self.0
}
}
impl PollEntry {
pub fn Next(&self) -> Option<PollEntry> {
return self.lock().next.clone();
}
pub fn Prev(&self) -> Option<PollEntry> {
return self.lock().prev.clone();
}
pub fn SetNext(&self, elem: Option<PollEntry>) {
self.lock().next = elem
}
pub fn SetPrev(&self, elem: Option<PollEntry>) {
self.lock().prev = elem
}
}
| ReadyState |
operations.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod private_endpoint_connection {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
private_endpoint_connection_name: &str,
) -> std::result::Result<PrivateEndpointConnectionResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn put(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
private_endpoint_connection_name: &str,
parameters: &PrivateEndpointConnectionResource,
) -> std::result::Result<put::Response, put::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(put::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(put::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(put::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(put::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(put::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionResource =
serde_json::from_slice(rsp_body).map_err(|source| put::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(put::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: PrivateEndpointConnectionResource =
serde_json::from_slice(rsp_body).map_err(|source| put::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(put::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(put::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod put {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(PrivateEndpointConnectionResource),
Created201(PrivateEndpointConnectionResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
private_endpoint_connection_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
private_endpoint_connection_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub async fn get_operation_status(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
private_endpoint_connection_name: &str,
operation_id: &str,
) -> std::result::Result<OperationStatus, get_operation_status::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/privateEndpointConnections/{}/operationsStatus/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , private_endpoint_connection_name , operation_id) ;
let mut url = url::Url::parse(url_str).map_err(get_operation_status::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_operation_status::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get_operation_status::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_operation_status::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationStatus = serde_json::from_slice(rsp_body)
.map_err(|source| get_operation_status::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| get_operation_status::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_operation_status::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_operation_status {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub mod backup_resource_vault_configs {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<BackupResourceVaultConfigResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupResourceVaultConfigResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn put(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
parameters: &BackupResourceVaultConfigResource,
) -> std::result::Result<BackupResourceVaultConfigResource, put::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(put::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(put::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(put::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(put::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(put::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupResourceVaultConfigResource =
serde_json::from_slice(rsp_body).map_err(|source| put::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| put::Error::DeserializeError(source, rsp_body.clone()))?;
Err(put::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod put {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
parameters: &BackupResourceVaultConfigResource,
) -> std::result::Result<BackupResourceVaultConfigResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupconfig/vaultconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupResourceVaultConfigResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protected_items {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
filter: Option<&str>,
) -> std::result::Result<ProtectedItemResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectedItemResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
parameters: &ProtectedItemResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectedItemResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProtectedItemResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protected_item_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/operationResults/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , operation_id) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectedItemResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(get::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProtectedItemResource),
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod recovery_points {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
filter: Option<&str>,
) -> std::result::Result<RecoveryPointResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RecoveryPointResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
recovery_point_id: &str,
) -> std::result::Result<RecoveryPointResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RecoveryPointResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod restores {
use crate::models::*;
pub async fn trigger(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
recovery_point_id: &str,
parameters: &RestoreRequestResource,
) -> std::result::Result<(), trigger::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/restore" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(trigger::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(trigger::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod trigger {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_policies {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
) -> std::result::Result<ProtectionPolicyResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionPolicyResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_policies {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
policy_name: &str,
) -> std::result::Result<ProtectionPolicyResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
policy_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionPolicyResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
} | use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
policy_name: &str,
parameters: &ProtectionPolicyResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
policy_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionPolicyResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProtectionPolicyResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
policy_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
policy_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_policy_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
policy_name: &str,
operation_id: &str,
) -> std::result::Result<ProtectionPolicyResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
policy_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionPolicyResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_jobs {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<JobResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: JobResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod job_details {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
job_name: &str,
) -> std::result::Result<JobResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: JobResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod job_cancellations {
use crate::models::*;
pub async fn trigger(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
job_name: &str,
) -> std::result::Result<(), trigger::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}/cancel",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
job_name
);
let mut url = url::Url::parse(url_str).map_err(trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(trigger::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod trigger {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod job_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
job_name: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/{}/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
job_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(get::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(get::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod export_jobs_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobs/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationResultInfoBaseResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => {
let rsp_body = rsp.body();
let rsp_value: OperationResultInfoBaseResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get::Response::Accepted202(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(OperationResultInfoBaseResource),
Accepted202(OperationResultInfoBaseResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod jobs {
use crate::models::*;
pub async fn export(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
) -> std::result::Result<(), export::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupJobsExport",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(export::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(export::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(export::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(export::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(export::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod export {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_protected_items {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<ProtectedItemResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectedItems",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectedItemResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operation {
use crate::models::*;
pub async fn validate(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
parameters: &ValidateOperationRequest,
) -> std::result::Result<ValidateOperationsResponse, validate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupValidateOperation",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(validate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(validate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(validate::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(validate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(validate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ValidateOperationsResponse =
serde_json::from_slice(rsp_body).map_err(|source| validate::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(validate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod validate {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_intent {
use crate::models::*;
pub async fn validate(
operation_config: &crate::OperationConfig,
azure_region: &str,
subscription_id: &str,
parameters: &PreValidateEnableBackupRequest,
) -> std::result::Result<PreValidateEnableBackupResponse, validate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupPreValidateProtection",
operation_config.base_path(),
subscription_id,
azure_region
);
let mut url = url::Url::parse(url_str).map_err(validate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(validate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(validate::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(validate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(validate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: PreValidateEnableBackupResponse =
serde_json::from_slice(rsp_body).map_err(|source| validate::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(validate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod validate {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
intent_object_name: &str,
) -> std::result::Result<ProtectionIntentResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionIntentResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
intent_object_name: &str,
parameters: &ProtectionIntentResource,
) -> std::result::Result<ProtectionIntentResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionIntentResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
intent_object_name: &str,
) -> std::result::Result<(), delete::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/backupProtectionIntent/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , intent_object_name) ;
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_status {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
azure_region: &str,
subscription_id: &str,
parameters: &BackupStatusRequest,
) -> std::result::Result<BackupStatusResponse, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupStatus",
operation_config.base_path(),
subscription_id,
azure_region
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(get::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupStatusResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod feature_support {
use crate::models::*;
pub async fn validate(
operation_config: &crate::OperationConfig,
azure_region: &str,
subscription_id: &str,
parameters: &FeatureSupportRequest,
) -> std::result::Result<AzureVmResourceFeatureSupportResponse, validate::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/providers/Microsoft.RecoveryServices/locations/{}/backupValidateFeatures",
operation_config.base_path(),
subscription_id,
azure_region
);
let mut url = url::Url::parse(url_str).map_err(validate::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(validate::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(validate::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(validate::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(validate::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AzureVmResourceFeatureSupportResponse =
serde_json::from_slice(rsp_body).map_err(|source| validate::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(validate::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod validate {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_protection_intent {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<ProtectionIntentResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectionIntents",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionIntentResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_usage_summaries {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<BackupManagementUsageList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupUsageSummaries",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupManagementUsageList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_engines {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<BackupEngineBaseResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEngines",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupEngineBaseResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
backup_engine_name: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<BackupEngineBaseResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupEngines/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
backup_engine_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupEngineBaseResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_container_refresh_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/operationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(get::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protectable_containers {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
filter: Option<&str>,
) -> std::result::Result<ProtectableContainerResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectableContainers",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectableContainerResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_containers {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
) -> std::result::Result<ProtectionContainerResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionContainerResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn register(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
parameters: &ProtectionContainerResource,
) -> std::result::Result<register::Response, register::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(register::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(register::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(register::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(register::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(register::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionContainerResource =
serde_json::from_slice(rsp_body).map_err(|source| register::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(register::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(register::Response::Accepted202),
status_code => {
let rsp_body = rsp.body();
Err(register::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod register {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProtectionContainerResource),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn unregister(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
) -> std::result::Result<unregister::Response, unregister::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name,
container_name
);
let mut url = url::Url::parse(url_str).map_err(unregister::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(unregister::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(unregister::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(unregister::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(unregister::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(unregister::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(unregister::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod unregister {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn inquire(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
filter: Option<&str>,
) -> std::result::Result<(), inquire::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/inquire" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(inquire::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(inquire::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(inquire::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(inquire::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(inquire::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod inquire {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn refresh(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
filter: Option<&str>,
) -> std::result::Result<(), refresh::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/refreshContainers",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
fabric_name
);
let mut url = url::Url::parse(url_str).map_err(refresh::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(refresh::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(refresh::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(refresh::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(refresh::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod refresh {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_workload_items {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<WorkloadItemResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/items" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name) ;
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadItemResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_container_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/operationResults/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , operation_id) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionContainerResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(get::Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(get::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ProtectionContainerResource),
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backups {
use crate::models::*;
pub async fn trigger(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
parameters: &BackupRequestResource,
) -> std::result::Result<(), trigger::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/backup" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name) ;
let mut url = url::Url::parse(url_str).map_err(trigger::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(trigger::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(trigger::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(trigger::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(trigger::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(trigger::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod trigger {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protected_item_operation_statuses {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
operation_id: &str,
) -> std::result::Result<OperationStatus, get::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/operationsStatus/{}" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , operation_id) ;
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationStatus =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod item_level_recovery_connections {
use crate::models::*;
pub async fn provision(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
recovery_point_id: &str,
parameters: &IlrRequestResource,
) -> std::result::Result<(), provision::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/provisionInstantItemRecovery" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(provision::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(provision::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(provision::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(provision::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(provision::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(provision::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod provision {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn revoke(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
fabric_name: &str,
container_name: &str,
protected_item_name: &str,
recovery_point_id: &str,
) -> std::result::Result<(), revoke::Error> {
let http_client = operation_config.http_client();
let url_str = & format ! ("{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupFabrics/{}/protectionContainers/{}/protectedItems/{}/recoveryPoints/{}/revokeInstantItemRecovery" , operation_config . base_path () , subscription_id , resource_group_name , vault_name , fabric_name , container_name , protected_item_name , recovery_point_id) ;
let mut url = url::Url::parse(url_str).map_err(revoke::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(revoke::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(revoke::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(revoke::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::ACCEPTED => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(revoke::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod revoke {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_operation_results {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
operation_id: &str,
) -> std::result::Result<get::Response, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupOperationResults/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(get::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(get::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(get::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_operation_statuses {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
operation_id: &str,
) -> std::result::Result<OperationStatus, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupOperations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationStatus =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod protection_policy_operation_statuses {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
policy_name: &str,
operation_id: &str,
) -> std::result::Result<OperationStatus, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupPolicies/{}/operations/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name,
policy_name,
operation_id
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationStatus =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_protectable_items {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
skip_token: Option<&str>,
) -> std::result::Result<WorkloadProtectableItemResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectableItems",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(skip_token) = skip_token {
url.query_pairs_mut().append_pair("$skipToken", skip_token);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: WorkloadProtectableItemResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_protection_containers {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
filter: Option<&str>,
) -> std::result::Result<ProtectionContainerResourceList, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupProtectionContainers",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ProtectionContainerResourceList =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod security_pi_ns {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<TokenInformation, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupSecurityPIN",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: TokenInformation =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod backup_resource_storage_configs {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<BackupResourceConfigResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupResourceConfigResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(get::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
parameters: &BackupResourceConfigResource,
) -> std::result::Result<BackupResourceConfigResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: BackupResourceConfigResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn patch(
operation_config: &crate::OperationConfig,
vault_name: &str,
resource_group_name: &str,
subscription_id: &str,
parameters: &BackupResourceConfigResource,
) -> std::result::Result<(), patch::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/Subscriptions/{}/resourceGroups/{}/providers/Microsoft.RecoveryServices/vaults/{}/backupstorageconfig/vaultstorageconfig",
operation_config.base_path(),
subscription_id,
resource_group_name,
vault_name
);
let mut url = url::Url::parse(url_str).map_err(patch::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(patch::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(patch::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(patch::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(patch::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(patch::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod patch {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operations {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<ClientDiscoveryResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.RecoveryServices/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ClientDiscoveryResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
} | pub mod get { |
layer-list.service.ts | import { Injectable, OnDestroy } from '@angular/core';
import {
Observable,
BehaviorSubject,
from,
fromEventPattern,
merge,
NEVER,
MonoTypeOperatorFunction,
of,
forkJoin
} from 'rxjs';
import { mergeMap, filter, switchMap, scan, take, toArray, withLatestFrom, debounceTime } from 'rxjs/operators';
import { LayerSource } from '@tamu-gisc/common/types';
import { EsriMapService, EsriModuleProviderService, MapServiceInstance } from '@tamu-gisc/maps/esri';
import { EnvironmentService } from '@tamu-gisc/common/ngx/environment';
import esri = __esri;
@Injectable()
export class | implements OnDestroy {
private _store: BehaviorSubject<LayerListItem<esri.Layer>[]> = new BehaviorSubject([]);
private _scale: BehaviorSubject<number> = new BehaviorSubject(undefined);
private _scaleThrottled: Observable<number>;
private _handles: esri.Handles;
constructor(
private moduleProvider: EsriModuleProviderService,
private mapService: EsriMapService,
private environment: EnvironmentService
) {
const LayerSources = this.environment.value('LayerSources');
this._scaleThrottled = this._scale.asObservable().pipe(debounceTime(250));
forkJoin([from(this.moduleProvider.require(['Handles'])), this.mapService.store]).subscribe(
([[HandlesConstructor], instance]: [[esri.HandlesConstructor], MapServiceInstance]) => {
this._handles = new HandlesConstructor();
// Perform a check against the map instance to add existing layers. Layers added after this
// point will be handled by the change event.
// Create a LayerListItem instance for each including the existing layer instance as a class property.
const existing: LayerListItem<esri.Layer>[] = instance.map.allLayers
.filter((l) => {
// Undefined value means "default" value, which is equal to "show"
return l.listMode === undefined || l.listMode === 'show';
})
.toArray()
.map((l) => {
return new LayerListItem({ layer: l });
});
// Determine layers in layer sources that are listed as show, but are being marked as lazy loaded.
// Create a LayerListItem instance for each, leaving the layer property undefined.
// This will be used as a flag to determine whether a layer needs to be lazy-loaded
const nonExisting: LayerListItem<esri.Layer>[] = LayerSources.filter((s) => {
return (s.listMode === undefined || s.listMode === 'show') && existing.findIndex((el) => s.id === el.id) === -1;
}).map((l) => {
return new LayerListItem(l);
});
// Concatenate the existing (true layer instances) and non existing layers (layer references to be lazy-loaded)
this._store.next([...existing, ...nonExisting]);
// Event handler that listens for layer changes in the map instance
instance.map.allLayers.on('change', (e) => {
// Handle added layers case
if (e.added) {
// Each event only has the layers for that particular event. It does not include layers in
// previous events, so some processing must be done to ensure all layers added are either added
// or updated properly in the service state.
// Create a copy of the service store and update items if the current event has layers that have
// been previously added to the store.
const updatedLayers = this._store.value.map((lyr) => {
const existingIndex = e.added.findIndex((added) => added.id === lyr.id);
if (existingIndex > -1) {
return new LayerListItem({ ...lyr, layer: e.added[existingIndex] });
} else {
return lyr;
}
});
// Layers that are not found in the service should simply be added.
const newLayers = e.added
.filter(
(al) =>
(al.listMode === undefined || al.listMode === 'show') &&
this._store.value.findIndex((cl) => cl.id === al.id) === -1
)
.map((l: esri.Layer) => {
return new LayerListItem({ layer: l });
});
// Concatenate updated and new layers, and set it to be the new store value.
this._store.next([...updatedLayers, ...newLayers]);
}
// Handle removed layers case
if (e.removed && e.removed.length > 0) {
const minusRemoved = this._store.value.filter((l) => {
return e.removed.findIndex((rl) => l.id === rl.id) === -1;
});
this._store.next(minusRemoved);
}
});
instance.view.watch('scale', (scale) => {
this._scale.next(scale);
});
}
);
}
public ngOnDestroy() {
// Clean up all layer handle references.
this._handles.removeAll();
}
/**
* Returns a collection of LayerListItems and emits whenever there is a change (add/removal)
* in map layers. Does not notify on layer property changes, by default.
*
* For the subscriber to receive notifications on property changes, provide a string or an
* array of property paths to the layers to be watched. If the property is valid, whenever
* it changes, it will trigger a subscription event.
*
*/
public layers(props?: ILayerSubscriptionProperties): Observable<LayerListItem<esri.Layer>[]> {
return (
merge(
this._store.pipe(
this.filterLayers(props, true),
switchMap((filtered) => from(filtered)),
mergeMap((item) => {
if (props && props.watchProperties) {
const handleKey = `${item.id}-${
typeof props.watchProperties === 'string'
? props.watchProperties.toString()
: props.watchProperties.join('-')
}`;
/**
* Adds a watch handler to the layer for the property provided if it does not exist yet.
*
* Function gets called when a new layer is added to the map.
*
*/
const add = (handler) => {
if (!this._handles.has(handleKey)) {
const handle = item.layer.watch(props.watchProperties, handler);
this._handles.add(handle, handleKey);
}
};
/**
* Destroys a property watch handler for a layer by handleKey if it exists.
*
* Function executed whenever the source observable is unsubscribed from.
*
*/
const remove = (handler): void => {
if (this._handles.has(handleKey)) {
this._handles.remove(handleKey);
}
};
// For every item, attempt to create a layer
return fromEventPattern(add, remove);
} else {
return NEVER;
}
})
),
this._store,
this._scaleThrottled
)
// Normalize either emission by mapping to the exposed store observable.
.pipe(this.mapLayerChangeEvent(), this.filterLayers(props, false))
);
}
/**
* RxJS operator responsible for normalizing and mapping a layer change event emission to a LayerListItem collection.
*
* The `layers` method subscribes to both layer add/removal activity on the map OR layer property watches. Since their respective
* emissions are different, they have to be normalized before any subscribers can process the value.
*
*/
private mapLayerChangeEvent(): MonoTypeOperatorFunction<LayerListItem<esri.Layer>[] | esri.WatchCallback | number> {
return ($input) =>
$input.pipe(
switchMap((collection) => {
// Check if the array is a collection of LayerListItem
// One of two conditions must be met:
//
// Collection is of length zero, which means there are no layers added to the map yet. We still want an emission out
// of an empty LayerListItem collection. In addition, if the collection is empty it cannot be an esri WatchCallback because
// the length of that array is known.
if (
collection instanceof Array &&
(<LayerListItem<esri.Layer>[]>collection).some((i) => i instanceof LayerListItem)
) {
return of(collection);
} else if (typeof collection === 'number' || collection === undefined) {
return this._store.asObservable().pipe(take(1));
} else {
// If the collection is not a list of LayerListItem, then it is a collection with esri WatchCallback values.
//
// To avoid multiple emissions for every layer despite having a single WatchHandle event, limit the LayerListItem
// collection to be only the affected LayerListItems
return this._store.asObservable().pipe(
take(1),
switchMap((list) => from(list)),
filter((item) => {
// Item at index 3 is the `target` layer object reference that contains an id property.
return item.id === collection[3].id;
}),
toArray()
);
}
})
);
}
/**
* RxJS operator that filters a collection of LayerListItem's
*
* @param {ILayerSubscriptionProperties} props The `layers` property is used from this object
* to reduce the original collection.
* @param {boolean} filterLazy If `true` will ignore lazy load layers during the filtering. This is used internally
* to prevent errors and unnecessary layer watch handles from being created. When `false`, all layers will be processed,
* and will not filter out layers marked for lazy loading which is useful to get a full list of layers for UI presentation,
* for example.
*/
private filterLayers(
props: ILayerSubscriptionProperties,
filterLazy: boolean
): MonoTypeOperatorFunction<Array<LayerListItem<esri.FeatureLayer | esri.GraphicsLayer>>> {
return (input$) =>
input$.pipe(
switchMap((list) => {
return from(list);
}),
withLatestFrom(this._scale),
filter(([item, scale]) => {
if (item.layer === undefined && filterLazy) {
return false;
}
// Since the item value can be an object or a boolean, only perform the following operations if `item`
// is an object type
if (item instanceof Object) {
if (
(scale !== undefined && item.layer && item.layer.maxScale !== 0 && item.layer.maxScale >= scale) ||
(scale !== undefined && item.layer && item.layer.minScale !== 0 && item.layer.minScale <= scale)
) {
item.outsideExtent = true;
} else {
item.outsideExtent = false;
}
}
if (props === undefined || props.layers === undefined) {
return true;
}
if (typeof props.layers === 'string') {
return props.layers === item.id;
} else if (props.layers instanceof Array) {
return props.layers.includes(item.id);
} else {
throw new Error(`Unexpected input parameter: ${JSON.stringify(props.layers)}`);
}
}),
// Since the source observable (service store), never completes a simple toArray() will not work here.
//
// It's possible to achieve the same end-result with the scan operator that collects stream emissions over time.
withLatestFrom(this._store),
scan((acc, [[layer], store]) => {
// Diff the accumulated layers with the store and remove anything from the accumulated that doesn't exist in store.
acc = store.map((sl) => acc.find((al) => al.id === sl.id)).filter((l) => l !== undefined);
// Check if the accumulated value contains the current layer by id
const existingIndex = acc.findIndex((l) => l.id === layer.id);
// If the existing index already has a defined layer, return the current accumulated value
// Since layers will always be references, their state value is handled by the API.
// We only need to make sure that our LayerListItem has the layer definition.
if (existingIndex > -1 && acc[existingIndex].layer !== undefined) {
return acc;
}
// If it does exist, and we reached this block it means the existingIndex LayerListItem does not have a layer
// reference. In that case, apply the reference.
//
// If the existingIndex is out of bounds (-1), then the LayerListItem does not exist in the accumulator. In this case, add it.
if (existingIndex > -1 && acc[existingIndex].layer === undefined) {
acc.splice(existingIndex, 1, layer);
return [...acc];
} else {
return [...acc, layer];
}
}, [])
);
}
}
export class LayerListItem<T extends esri.Layer> {
public id: LayerSource['id'];
public title: LayerSource['title'];
public layer: T;
public category: LayerSource['category'];
public outsideExtent = false;
constructor(props: { id?: string; title?: string; layer?: T; category?: string }) {
this.layer = props.layer;
// If a layer is provided, inherit layer properties, else set
if (props.layer) {
this.id = props.layer.id;
this.title = props.layer.title;
this.category = props.category;
} else {
this.id = props.id || '';
this.title = props.title || '';
this.category = props.category;
}
}
}
export interface LayerListStore<T extends esri.Layer> {
categories: LayerListCategory<T>[];
}
export interface LayerListCategory<T extends esri.Layer> {
title: string;
layers: LayerListItem<T>[];
expanded: boolean;
}
export interface ILayerSubscriptionProperties {
/**
* Layer ID or ID's that will be returned with the subscription.
*
* Will default to return all layers.
*/
layers?: string | string[];
/**
* List of layer properties that trigger a state emission.
*/
watchProperties?: string | string[];
}
| LayerListService |
loader.js | const path = require('path');
const loaderUtils = require('loader-utils');
function | (content, icons) {
if (icons.length) {
let newContent = '/* icons-loader */\n';
newContent += `import installIcons from ${loaderUtils.stringifyRequest(
this,
'!' + require.resolve('./runtime.js')
)}\n`;
newContent += `import {${icons.join(
','
)}} from ${loaderUtils.stringifyRequest(
this,
'!' + require.resolve('@mdi/js')
)};\n`;
newContent += `installIcons(component, {${icons.join(',')}})\n`;
// Insert our modification before the HMR code
const hotReload = content.indexOf('/* hot reload */');
if (hotReload > -1) {
content =
content.slice(0, hotReload) +
newContent +
'\n\n' +
content.slice(hotReload);
} else {
content += '\n\n' + newContent;
}
}
return content;
}
module.exports = async function (content, sourceMap) {
this.async();
this.cacheable();
if (!this.resourceQuery) {
const readFile = (path) =>
new Promise((resolve, reject) => {
this.fs.readFile(path, function (err, data) {
if (err) reject(err);
else resolve(data);
});
});
this.addDependency(this.resourcePath);
const file = (await readFile(this.resourcePath)).toString('utf8');
const icons = Array.from(
new Set(Array.from(file.matchAll(/\$icons\.(mdi\w+)/g), (m) => m[1]))
);
content = install.call(this, content, icons);
}
this.callback(null, content, sourceMap);
};
| install |
Vreceive.py | #!/ufs/guido/bin/sgi/python
# Receive live video UDP packets.
# Usage: Vreceive [port]
import sys
import struct
from socket import * # syscalls and support functions
from SOCKET import * # <sys/socket.h>
from IN import * # <netinet/in.h>
import select
import struct
import gl, GL, DEVICE
sys.path.append('/ufs/guido/src/video')
import LiveVideoOut
import regsub
import getopt
from senddefs import *
# Print usage message and exit(2).
def usage(msg):
print msg
print 'usage: Vreceive [-m mcastgrp] [-p port] [-c type]'
print '-m mcastgrp: multicast group (default ' + `DEFMCAST` + ')'
print '-p port : port (default ' + `DEFPORT` + ')'
print '-c type : signal type: rgb8, grey or mono (default rgb8)'
sys.exit(2)
# Main program: parse options and main loop.
def main():
|
# Subroutine to create and properly initialize the receiving socket
def opensocket(group, port):
# Create the socket
s = socket(AF_INET, SOCK_DGRAM)
# Allow multiple copies of this program on one machine
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) # (Not strictly needed)
# Bind the port to it
s.bind('', port)
# Look up the group once
group = gethostbyname(group)
# Construct binary group address
group_bytes = eval(regsub.gsub('\.', ',', group))
grpaddr = 0
for byte in group_bytes: grpaddr = (grpaddr << 8) | byte
# Construct struct mreq from grpaddr and ifaddr
ifaddr = INADDR_ANY
mreq = struct.pack('ll', grpaddr, ifaddr)
# Add group membership
s.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
return s
main()
| sys.stdout = sys.stderr
group = DEFMCAST
port = DEFPORT
width = DEFWIDTH
height = DEFHEIGHT
vtype = 'rgb8'
try:
opts, args = getopt.getopt(sys.argv[1:], 'm:p:c:')
except getopt.error, msg:
usage(msg)
try:
for opt, optarg in opts:
if opt == '-p':
port = string.atoi(optarg)
if opt == '-m':
group = gethostbyname(optarg)
if opt == '-c':
vtype = optarg
except string.atoi_error, msg:
usage('bad integer: ' + msg)
s = opensocket(group, port)
gl.foreground()
gl.prefsize(width, height)
wid = gl.winopen('Vreceive')
gl.winconstraints()
gl.qdevice(DEVICE.ESCKEY)
gl.qdevice(DEVICE.WINSHUT)
gl.qdevice(DEVICE.WINQUIT)
lvo = LiveVideoOut.LiveVideoOut(wid, width, height, vtype)
ifdlist = [gl.qgetfd(), s.fileno()]
ofdlist = []
xfdlist = []
timeout = 1.0
selectargs = (ifdlist, ofdlist, xfdlist, timeout)
while 1:
if gl.qtest():
dev, val = gl.qread()
if dev in (DEVICE.ESCKEY, \
DEVICE.WINSHUT, DEVICE.WINQUIT):
break
if dev == DEVICE.REDRAW:
lvo.reshapewindow()
elif s.avail():
data = s.recv(16*1024)
pos, w, h = struct.unpack('hhh', data[:6])
if (w, h) <> (width, height):
x, y = gl.getorigin()
y = y + height - h
gl.winposition(x, x+w-1, y, y+h-1)
width, height = w, h
lvo.resizevideo(width, height)
lvo.putnextpacket(pos, data[6:])
else:
x = select.select(selectargs)
lvo.close() |
vector.rs | use std::marker::PhantomData;
use std::slice;
use libc::{c_int, c_double};
use crate::ffi::*;
pub struct Vector<'a> {
ptr: *mut SwsVector,
_own: bool,
_marker: PhantomData<&'a ()>,
} | Vector { ptr: ptr, _own: false, _marker: PhantomData }
}
pub unsafe fn as_ptr(&self) -> *const SwsVector {
self.ptr as *const _
}
pub unsafe fn as_mut_ptr(&mut self) -> *mut SwsVector {
self.ptr
}
}
impl<'a> Vector<'a> {
pub fn new(length: usize) -> Self {
unsafe {
Vector { ptr: sws_allocVec(length as c_int), _own: true, _marker: PhantomData }
}
}
pub fn gaussian(variance: f64, quality: f64) -> Self {
unsafe {
Vector { ptr: sws_getGaussianVec(variance as c_double, quality as c_double), _own: true, _marker: PhantomData }
}
}
pub fn value(value: f64, length: usize) -> Self {
unsafe {
Vector { ptr: sws_getConstVec(value as c_double, length as c_int), _own: true, _marker: PhantomData }
}
}
pub fn identity() -> Self {
unsafe {
Vector { ptr: sws_getIdentityVec(), _own: true, _marker: PhantomData }
}
}
pub fn scale(&mut self, scalar: f64) {
unsafe {
sws_scaleVec(self.as_mut_ptr(), scalar as c_double);
}
}
pub fn normalize(&mut self, height: f64) {
unsafe {
sws_normalizeVec(self.as_mut_ptr(), height as c_double);
}
}
pub fn conv(&mut self, other: &Vector) {
unsafe {
sws_convVec(self.as_mut_ptr(), other.as_ptr() as *mut _);
}
}
pub fn add(&mut self, other: &Vector) {
unsafe {
sws_addVec(self.as_mut_ptr(), other.as_ptr() as *mut _);
}
}
pub fn sub(&mut self, other: &Vector) {
unsafe {
sws_subVec(self.as_mut_ptr(), other.as_ptr() as *mut _);
}
}
pub fn shift(&mut self, value: usize) {
unsafe {
sws_shiftVec(self.as_mut_ptr(), value as c_int);
}
}
pub fn coefficients(&self) -> &[f64] {
unsafe {
slice::from_raw_parts((*self.as_ptr()).coeff,
(*self.as_ptr()).length as usize)
}
}
pub fn coefficients_mut(&self) -> &[f64] {
unsafe {
slice::from_raw_parts_mut((*self.as_ptr()).coeff,
(*self.as_ptr()).length as usize)
}
}
}
impl<'a> Clone for Vector<'a> {
fn clone(&self) -> Self {
unsafe {
Vector { ptr: sws_cloneVec(self.as_ptr() as *mut _), _own: true, _marker: PhantomData }
}
}
}
impl<'a> Drop for Vector<'a> {
fn drop(&mut self) {
unsafe {
if self._own {
sws_freeVec(self.as_mut_ptr());
}
}
}
} |
impl<'a> Vector<'a> {
pub unsafe fn wrap(ptr: *mut SwsVector) -> Self { |
objiter.rs | /*
* Various types to support iteration.
*/
use super::super::pyobject::{
AttributeProtocol, PyContext, PyFuncArgs, PyObjectKind, PyObjectRef, PyResult, TypeProtocol,
};
use super::super::vm::VirtualMachine;
use super::objbool;
// use super::objstr;
use super::objtype; // Required for arg_check! to use isinstance
/*
* This helper function is called at multiple places. First, it is called
* in the vm when a for loop is entered. Next, it is used when the builtin
* function 'iter' is called.
*/
pub fn get_iter(vm: &mut VirtualMachine, iter_target: &PyObjectRef) -> PyResult {
vm.call_method(iter_target, "__iter__", vec![])
// let type_str = objstr::get_value(&vm.to_str(iter_target.typ()).unwrap());
// let type_error = vm.new_type_error(format!("Cannot iterate over {}", type_str));
// return Err(type_error);
}
/*
* Helper function to retrieve the next object (or none) from an iterator.
*/
pub fn get_next_object(
vm: &mut VirtualMachine,
iter_obj: &PyObjectRef,
) -> Result<Option<PyObjectRef>, PyObjectRef> {
let next_obj: PyResult = vm.call_method(iter_obj, "__next__", vec![]);
match next_obj {
Ok(value) => Ok(Some(value)),
Err(next_error) => {
// Check if we have stopiteration, or something else:
if objtype::isinstance(&next_error, &vm.ctx.exceptions.stop_iteration) {
Ok(None)
} else {
Err(next_error)
}
}
}
}
/* Retrieve all elements from an iterator */
pub fn | (
vm: &mut VirtualMachine,
iter_obj: &PyObjectRef,
) -> Result<Vec<PyObjectRef>, PyObjectRef> {
let mut elements = vec![];
loop {
let element = get_next_object(vm, iter_obj)?;
match element {
Some(v) => elements.push(v),
None => break,
}
}
Ok(elements)
}
// Sequence iterator:
fn iter_new(vm: &mut VirtualMachine, args: PyFuncArgs) -> PyResult {
arg_check!(vm, args, required = [(iter_target, None)]);
get_iter(vm, iter_target)
}
fn iter_iter(vm: &mut VirtualMachine, args: PyFuncArgs) -> PyResult {
arg_check!(vm, args, required = [(iter, Some(vm.ctx.iter_type()))]);
// Return self:
Ok(iter.clone())
}
fn iter_contains(vm: &mut VirtualMachine, args: PyFuncArgs) -> PyResult {
arg_check!(
vm,
args,
required = [(iter, Some(vm.ctx.iter_type())), (needle, None)]
);
loop {
match vm.call_method(&iter, "__next__", vec![]) {
Ok(element) => match vm.call_method(needle, "__eq__", vec![element.clone()]) {
Ok(value) => {
if objbool::get_value(&value) {
return Ok(vm.new_bool(true));
} else {
continue;
}
}
Err(_) => return Err(vm.new_type_error("".to_string())),
},
Err(_) => return Ok(vm.new_bool(false)),
}
}
}
fn iter_next(vm: &mut VirtualMachine, args: PyFuncArgs) -> PyResult {
arg_check!(vm, args, required = [(iter, Some(vm.ctx.iter_type()))]);
if let PyObjectKind::Iterator {
ref mut position,
iterated_obj: ref iterated_obj_ref,
} = iter.borrow_mut().kind
{
let iterated_obj = &*iterated_obj_ref.borrow_mut();
match iterated_obj.kind {
PyObjectKind::Sequence { ref elements } => {
if *position < elements.len() {
let obj_ref = elements[*position].clone();
*position += 1;
Ok(obj_ref)
} else {
let stop_iteration_type = vm.ctx.exceptions.stop_iteration.clone();
let stop_iteration =
vm.new_exception(stop_iteration_type, "End of iterator".to_string());
Err(stop_iteration)
}
}
_ => {
panic!("NOT IMPL");
}
}
} else {
panic!("NOT IMPL");
}
}
pub fn init(context: &PyContext) {
let ref iter_type = context.iter_type;
iter_type.set_attr("__contains__", context.new_rustfunc(iter_contains));
iter_type.set_attr("__iter__", context.new_rustfunc(iter_iter));
iter_type.set_attr("__new__", context.new_rustfunc(iter_new));
iter_type.set_attr("__next__", context.new_rustfunc(iter_next));
}
| get_all |
chacha20.rs | //! Chacha20 encryption functions
//!
//! [Official documentation](https://monocypher.org/manual/advanced/chacha20)
use ffi;
use std::mem;
pub struct Context(ffi::crypto_chacha_ctx);
/// These functions provide an incremental interface for the Chacha20 encryption primitive.
///
/// # Example
///
/// ```
/// use monocypher::chacha20::Context;
/// use monocypher::utils::wipe;
///
/// let mut key: [u8; 32] = [
/// 171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
/// 228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
/// ];
/// let nonce: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 1];
///
/// let mut ctx = Context::new(&key, nonce);
/// let mut ctx2 = Context::new(&key, nonce);
/// let ciphertext = ctx.encrypt("test".as_bytes());
/// let plaintext = ctx2.decrypt(&ciphertext);
///
/// wipe(&mut key);
///
/// assert_eq!(&plaintext, &"test".as_bytes())
/// ```
impl Context {
/// Initialises a new context with the given key and nonce.
/// Uses an 8-byte nonce, which is too small to be selected at random.
/// Use a counter.
#[inline]
pub fn new(key: &[u8], nonce: [u8; 8]) -> Context {
unsafe {
let mut ctx = mem::MaybeUninit::<ffi::crypto_chacha_ctx>::uninit();
ffi::crypto_chacha20_init(ctx.as_mut_ptr() as *mut ffi::crypto_chacha_ctx, key.as_ptr(), nonce.as_ptr());
Context(ctx.assume_init())
}
}
/// Initialises a new context with the given key and nonce.
/// Uses a 24-byte nonce, which is big enough to be selected at random.
/// Use your operating system to generate cryptographic secure random numbers.
/// Read the about random number generators in the [documentation](https://monocypher.org/manual/)
#[inline]
pub fn new_x(key: &[u8], nonce: [u8; 24]) -> Context {
unsafe {
let mut ctx = mem::MaybeUninit::<ffi::crypto_chacha_ctx>::uninit();
ffi::crypto_chacha20_x_init(ctx.as_mut_ptr() as *mut ffi::crypto_chacha_ctx, key.as_ptr(), nonce.as_ptr());
Context(ctx.assume_init())
}
}
/// Encrypts the given plaintext.
#[inline]
pub fn encrypt(&mut self, plaintext: &[u8]) -> Vec<u8> {
let mut cipher_text = vec![0u8; plaintext.len()];
unsafe {
ffi::crypto_chacha20_encrypt(
&mut self.0,
cipher_text.as_mut_ptr(),
plaintext.as_ptr(),
plaintext.len(),
);
cipher_text
}
}
/// Decrypts the given ciphertext.
#[inline]
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Vec<u8> {
self.encrypt(ciphertext)
}
/// Same as encrypt but with plaintext beeing NULL.
/// Usefull as a non cryptographic user space random number generator.
#[inline]
pub fn stream(&mut self, stream: &mut [u8]) {
unsafe {
ffi::crypto_chacha20_stream(&mut self.0, stream.as_mut_ptr(), stream.len());
}
}
/// Resets the internal counter of the context to the given number.
/// Resuming the encryption will use the stream at the block number.
/// May be used to en/decrypt part of a long message.
/// Can also be used to implement AEAD constructions like the ones
/// explained in [RFC 7539](https://tools.ietf.org/html/rfc7539).
#[inline]
pub fn chacha20_set_ctr(&mut self, ctr: u64) {
unsafe {
ffi::crypto_chacha20_set_ctr(&mut self.0, ctr);
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn new() {
let key: [u8; 32] = [
171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
];
let nonce: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 1];
let mut ctx = Context::new(&key, nonce);
let mut ctx2 = Context::new(&key, nonce);
let ciphertext = ctx.encrypt("test".as_bytes());
let plaintext = ctx2.decrypt(&ciphertext);
assert_eq!(&plaintext, &"test".as_bytes())
}
#[test]
fn new_wrong_nonce() {
let key: [u8; 32] = [
171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
];
let nonce: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 1];
let nonce2: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 8];
let mut ctx = Context::new(&key, nonce);
let mut ctx2 = Context::new(&key, nonce2); | assert_ne!(&plaintext, &"test".as_bytes())
}
#[test]
fn new_x() {
let key: [u8; 32] = [
171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
];
let nonce = [1u8; 24];
let mut ctx = Context::new_x(&key, nonce);
let mut ctx2 = Context::new_x(&key, nonce);
let ciphertext = ctx.encrypt("test".as_bytes());
let plaintext = ctx2.decrypt(&ciphertext);
assert_eq!(&plaintext, &"test".as_bytes())
}
#[test]
fn stream() {
let key: [u8; 32] = [
171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
];
let nonce: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 1];
let mut ctx = Context::new(&key, nonce);
let mut v: Vec<u8> = vec![0, 0, 0, 0];
ctx.stream(& mut v);
assert_ne!(v, vec![0, 0, 0, 0])
}
#[test]
fn ctx() {
let key: [u8; 32] = [
171, 107, 219, 186, 0, 173, 209, 50, 252, 77, 93, 85, 99, 106, 222, 162, 122, 140, 150,
228, 61, 93, 186, 251, 45, 23, 222, 14, 121, 172, 147, 241,
];
let nonce: [u8; 24] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
let nonce2: [u8; 24] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2];
let mut ctx = Context::new_x(&key, nonce);
let mut ctx2 = Context::new_x(&key, nonce2);
let ciphertext = ctx.encrypt("test".as_bytes());
ctx2.chacha20_set_ctr(1);
let plaintext = ctx2.decrypt(&ciphertext);
assert_ne!(&plaintext, &"test".as_bytes())
}
} | let ciphertext = ctx.encrypt("test".as_bytes());
let plaintext = ctx2.decrypt(&ciphertext);
|
stringutils.go | package stringutils | )
// MakeLines joins multiple strings with a newline character.
func MakeLines(lines ...string) string {
return strings.Join(lines, "\n")
} |
import (
"strings" |
Paper.js | import React from 'react';
import PropTypes from 'prop-types';
import {withStyles} from '@material-ui/core/styles';
import Grid from '@material-ui/core/Grid';
import Paper from '@material-ui/core/Paper';
import Typography from '@material-ui/core/Typography';
import AppBar from '@material-ui/core/AppBar';
import Button from '@material-ui/core/Button';
export default class Test extends React.Component {
constructor() {
super();
this.state = {
items: []
};
}
componentDidMount() {
this.reloadData();
}
reloadData() {
fetch('http://localhost:3000/score/users').then(function(response) {
return response.json();
}).then(myJson => {
this.setState({ | });
});
}
render() {
var style = {
Paper: {
padding: 10,
marginTop: 10,
marginBottom: 10,
marginLeft: 20,
marginRight: 20
},
Button: {
marginLeft: 20,
flex: 1
},
root: {
flexGrow: 1
}
};
return(
<div> <Grid container spacing = { 24 } style = {style.root}>
<Grid item xs> {this.state.items.map(hello =>
<Paper key = {hello.idUser} style = {style.Paper}>{hello.firstName} {hello.lastName} < Button style = {style.Button} variant = 'outlined' > action </Button> </Paper> )
} </Grid>
</Grid>
< /div>
);
}
} | items: myJson.users |
patchStyleOptions.js | import defaultStyleOptions from './defaultStyleOptions';
// TODO: [P4] We should add a notice for people who want to use "styleSet" instead of "styleOptions".
// "styleSet" is actually CSS stylesheet and it is based on the DOM tree.
// DOM tree may change from time to time, thus, maintaining "styleSet" becomes a constant effort.
// eslint-disable-next-line complexity
export default function patchStyleOptions(
options,
{ groupTimestamp: groupTimestampFromProps, sendTimeout: sendTimeoutFromProps }
) {
const patchedOptions = { ...defaultStyleOptions, ...options };
// Keep this list flat (no nested style) and serializable (no functions)
// TODO: [P4] Deprecate this code after bump to v5
const { bubbleFromUserNubOffset, bubbleNubOffset, emojiSet } = patchedOptions;
if (bubbleFromUserNubOffset === 'top') {
patchedOptions.bubbleFromUserNubOffset = 0;
} else if (typeof bubbleFromUserNubOffset !== 'number') {
patchedOptions.bubbleFromUserNubOffset = -0;
}
if (bubbleNubOffset === 'top') {
patchedOptions.bubbleNubOffset = 0;
} else if (typeof bubbleNubOffset !== 'number') {
patchedOptions.bubbleNubOffset = -0;
}
if (emojiSet === true) {
patchedOptions.emojiSet = {
':)': '😊',
':-)': '😊',
'(:': '😊',
'(-:': '😊',
':-|': '😐',
':|': '😐',
':-(': '☹️',
':(': '☹️',
':-D': '😀',
':D': '😀',
':-p': '😛',
':p': '😛',
':-P': '😛',
':P': '😛',
':-o': '😲',
':o': '😲',
':O': '😲',
':-O': '😲',
':-0': '😲',
':0': '😲',
';-)': '😉',
';)': '😉',
'<3': '❤️',
'</3': '💔',
'<\\3': '💔'
};
} else if (Object.prototype.toString.call(patchedOptions.emojiSet) !== '[object Object]') {
console.warn('botframework-webchat: emojiSet must be a boolean or an object with emoticon: emojiValues');
patchedOptions.emojiSet = false;
}
if (typeof groupTimestampFromProps !== 'undefined' && typeof options.groupTimestamp === 'undefined') {
console.warn(
'Web Chat: "groupTimestamp" has been moved to "styleOptions". This deprecation migration will be removed on or after January 1 2022.'
);
patchedOptions.groupTimestamp = groupTimestampFromProps;
}
if (typeof sendTimeoutFromProps !== 'undefined' && typeof options.sendTimeout === 'undefined') {
console.warn(
'Web Chat: "sendTimeout" has been moved to "styleOptions". This deprecation migration will be removed on or after January 1 2022.'
);
patchedOptions.sendTimeout = sendTimeoutFromProps;
}
if (patchedOptions.slowConnectionAfter < 0) { | console.warn('Web Chat: "slowConnectionAfter" cannot be negative, will set to 0.');
patchedOptions.slowConnectionAfter = 0;
}
return patchedOptions;
} | |
main.rs | //! A good way of displaying an SVG image in egui.
//!
//! Requires the dependency `egui_extras` with the `svg` feature.
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] // hide console window on Windows in release
use eframe::egui;
fn | () {
let options = eframe::NativeOptions {
initial_window_size: Some(egui::vec2(1000.0, 700.0)),
..Default::default()
};
eframe::run_native(
"svg example",
options,
Box::new(|_cc| Box::new(MyApp::default())),
);
}
struct MyApp {
svg_image: egui_extras::RetainedImage,
}
impl Default for MyApp {
fn default() -> Self {
Self {
svg_image: egui_extras::RetainedImage::from_svg_bytes(
"rustacean-flat-happy.svg",
include_bytes!("rustacean-flat-happy.svg"),
)
.unwrap(),
}
}
}
impl eframe::App for MyApp {
fn update(&mut self, ctx: &mut egui::Context, _frame: &mut eframe::Frame) {
egui::CentralPanel::default().show(ctx, |ui| {
ui.heading("SVG example");
ui.label("The SVG is rasterized and displayed as a texture.");
ui.separator();
let max_size = ui.available_size();
self.svg_image.show_max_size(ui, max_size);
});
}
}
| main |
test_rope.rs | use bytes::Rope;
use bytes::traits::*;
use super::gen_bytes;
use crates_unittest::test_case;
use std::prelude::v1::*;
const TEST_BYTES_1: &'static [u8] =
b"dblm4ng7jp4v9rdn1w6hhssmluoqrrrqj59rccl9
nkv2tm1t2da4jyku51ge7f8hv581gkki8lekmf5f
1l44whp4aiwbvhkziw02292on4noyvuwjzsloqyc
5n0iyn4l6o6tgjhlek00mynfzb1wgcwj4mqp6zdr
3625yy7rj7xuisal7b1a7xgq271abvt5ssxuj39v
njtetokxxrgxzp7ik9adnypkmmcn4270yv9l46m7
9mu2zmqmkxdmgia210vkdytb7ywfcyt2bvcsg9eq
5yqizxl6888zrksvaxhzs2v355jxu8gr21m33t83
qvoian1ra7c6pvxabshgngldxa408p18l1fdet2h";
const TEST_BYTES_2: &'static [u8] =
b"jmh14t79mllzj1ohxfj6fun7idwbks8oh35f83g6
ryaowe86mmou5t1xa91uyg8e95wcu5mje1mswien
tt4clgj029cw0pyuvfbvsgzdg1x7sr9qsjkf2b1t
h43smgp1ea22lph17f78cel0cc2kjoht5281xuy8
0ex9uaqwj4330jrp30stsk15j9bpqezu3w78ktit
ev5g6xsngr35q7pemdm9hihf0ebrw5fbwhm530lo
e0zyj1bm7yfyk7f2i45jhr3wu3bvb4hj8jve6db0
iewmr9weecaon9vdnqo5hen9iaiox5vsaxuo461m
8336ugp20u4sfky3kfawr0ome1tiqyx8chkerrjh
a95s0gypcsgo9jqxasqkoj08t4uq5moxmay5plg5
tlh6f9omhn0ezvi0w2n8hx7n6qk7rn1s3mjpnpl6
hvilp8awaa4tvsis66q4e5b3xwy2z1h2klpa87h7";
#[test_case]
pub fn test_rope_round_trip() {
let rope = Rope::from_slice(b"zomg");
assert_eq!(4, rope.len());
let mut dst = vec![];
rope.buf().read(&mut dst).unwrap();
assert_eq!(b"zomg", &dst[..]);
}
#[test_case]
pub fn test_rope_slice() {
let mut dst = vec![];
let bytes = Rope::from_slice(TEST_BYTES_1);
assert_eq!(TEST_BYTES_1.len(), bytes.len());
bytes.buf().read(&mut dst).unwrap();
assert_eq!(dst, TEST_BYTES_1);
let left = bytes.slice_to(250);
assert_eq!(250, left.len());
left.buf().read(&mut dst).unwrap();
assert_eq!(dst, &TEST_BYTES_1[..250]);
let right = bytes.slice_from(250);
assert_eq!(TEST_BYTES_1.len() - 250, right.len());
right.buf().read(&mut dst).unwrap();
assert_eq!(dst, &TEST_BYTES_1[250..]);
}
#[test_case]
pub fn test_rope_concat_two_byte_str() {
let mut dst = vec![];
let left = Rope::from_slice(TEST_BYTES_1);
let right = Rope::from_slice(TEST_BYTES_2); |
let both = left.concat(&right);
assert_eq!(both.len(), TEST_BYTES_1.len() + TEST_BYTES_2.len());
both.buf().read(&mut dst).unwrap();
let mut expected = Vec::new();
expected.extend(TEST_BYTES_1.iter().cloned());
expected.extend(TEST_BYTES_2.iter().cloned());
assert_eq!(dst, expected);
}
// #[test_case]
// #[ignore]
// pub fn test_slice_parity() {
// let bytes = gen_bytes(2048 * 1024);
// let start = 512 * 1024 - 3333;
// let end = 512 * 1024 + 7777;
// let _ = Rope::from_slice(&bytes).slice(start, end);
// // stuff
// }
#[test_case]
pub fn test_rope_equality() {
let a = &b"Mary had a little lamb, its fleece was white as snow; ".to_bytes()
.concat(&b"And everywhere that Mary went, the lamb was sure to go.".to_bytes());
let b = &b"Mary had a little lamb, ".to_bytes()
.concat(&b"its fleece was white as snow; ".to_bytes())
.concat(
&b"And everywhere that Mary went, ".to_bytes()
.concat(&b"the lamb was sure to go.".to_bytes()));
assert_eq!(a, b);
} | |
role_assignment_properties_with_scope_py3.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RoleAssignmentPropertiesWithScope(Model):
"""Role assignment properties with scope.
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
"""
_attribute_map = {
'scope': {'key': 'scope', 'type': 'str'},
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(self, *, scope: str=None, role_definition_id: str=None, principal_id: str=None, **kwargs) -> None:
super(RoleAssignmentPropertiesWithScope, self).__init__(**kwargs)
self.scope = scope | self.role_definition_id = role_definition_id
self.principal_id = principal_id | |
check_mpi.py | import os
import distutils.spawn | mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
for executable, path in mpi4py.get_config().items():
if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
continue
if mpiexec_path not in path:
raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
if 'Open MPI' not in MPI.get_vendor():
raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
if vendor_number not in mpiexec_path:
raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
check_mpi() | import mpi4py
from mpi4py import MPI
def check_mpi(): |
main.rs | //
// Copyright (C) 2018 Kubos Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use getopts::Options;
use kubos_system::{Config, DEFAULT_PATH};
use kubos_telemetry_db::{Database, Entry};
use rand::{thread_rng, Rng};
use serde_json::{json, ser};
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::time::Duration;
use std::{env, thread};
use time::PreciseTime;
const DEFAULT_ITERATIONS: i64 = 1000;
const TEST_NAME_MAX_COLS: usize = 30;
const TEST_NUM_MAX_COLS: usize = 10;
fn pad(s: &str, cols: usize) -> String {
if s.len() < cols {
let mut s2 = String::from(s);
s2.push_str(&" ".repeat(cols - s.len()));
return s2;
}
s.to_string()
}
fn pad_name(name: &str) -> String {
pad(name, TEST_NAME_MAX_COLS)
}
fn pad_num<T: ToString>(val: T) -> String {
pad(&val.to_string(), TEST_NUM_MAX_COLS)
}
struct DbTest {
iterations: i64,
config: Config,
}
struct PerfResult {
name: String,
avg_us: i64,
total_us: i64,
}
impl PerfResult {
fn new(name: &str, avg_us: i64, total_us: i64) -> PerfResult {
PerfResult {
name: name.to_string(),
avg_us,
total_us,
}
}
fn print(&self) {
println!(
"{} | {} | {}",
pad_name(&self.name),
pad_num(self.avg_us),
pad_num(self.total_us)
);
}
}
impl DbTest {
fn new(iterations: i64, config_path: String) -> DbTest {
DbTest {
iterations,
config: Config::new_from_path("telemetry-service", config_path).unwrap(),
}
}
fn db_insert_test(&self) -> PerfResult {
let db_path = self
.config
.get("database")
.expect("No database path found in config file");
let db_path = db_path.as_str().unwrap();
let db = Database::new(&db_path);
db.setup();
let mut times: Vec<i64> = Vec::new();
for _ in 0..self.iterations {
let timestamp: f64 = thread_rng().gen_range(0.0, 100_000_000_000_000_000.0);
let start = PreciseTime::now();
if db
.insert(timestamp, "db-test", "parameter", "value")
.is_ok()
{
times.push(start.to(PreciseTime::now()).num_microseconds().unwrap());
}
}
let num_entries = times.len() as i64;
let sum: i64 = times.iter().sum();
let average = sum / num_entries;
PerfResult::new("local_db_api_insert", average, sum)
}
fn db_insert_bulk_test(&self) -> PerfResult {
let db_path = self
.config
.get("database")
.expect("No database path found in config file");
let db_path = db_path.as_str().unwrap();
let db = Database::new(&db_path);
db.setup();
let mut entries: Vec<Entry> = Vec::new();
for _ in 0..self.iterations {
let timestamp: f64 = thread_rng().gen_range(0.0, 100_000_000_000_000_000.0);
entries.push(Entry {
timestamp,
subsystem: "db-test".to_string(),
parameter: "parameter".to_string(),
value: "value".to_string(),
});
}
let start = PreciseTime::now();
let end = match db.insert_bulk(entries) {
Ok(_) => start.to(PreciseTime::now()).num_microseconds().unwrap(), | }
fn graphql_insert_test(&self) -> PerfResult {
let mut times: Vec<i64> = Vec::new();
for _ in 0..self.iterations {
let mut rng = thread_rng();
let timestamp = rng.gen_range(0, ::std::i32::MAX);
let mutation = format!(
r#"mutation {{
insert(timestamp: {}, subsystem: "db-test", parameter: "voltage", value: "4.0") {{
success,
errors
}}
}}"#,
timestamp
);
let start = PreciseTime::now();
let client = reqwest::Client::builder().build().unwrap();
let uri = format!("http://{}", self.config.hosturl().unwrap());
let mut map = ::std::collections::HashMap::new();
map.insert("query", mutation);
match client.post(&uri).json(&map).send() {
Ok(_) => times.push(start.to(PreciseTime::now()).num_microseconds().unwrap()),
Err(e) => panic!("recv function failed: {:?}", e),
}
}
let num_entries = times.len() as i64;
let sum: i64 = times.iter().sum();
let average = sum / num_entries;
PerfResult::new("remote_gql_insert", average, sum)
}
fn graphql_insert_bulk_test(&self) -> PerfResult {
let mut bulk_entries = String::from("[");
for i in 0..self.iterations {
let mut rng = thread_rng();
let timestamp = rng.gen_range(0, ::std::i32::MAX);
let next = if i < self.iterations - 1 { "," } else { "]" };
let entry = format!(
r#"{{ timestamp: {}, subsystem: "db-test", parameter: "voltage", value: "5.0" }}{}"#,
timestamp, next
);
bulk_entries.push_str(&entry);
}
let mutation = format!(
r#"
mutation {{
insertBulk(entries: {}) {{
success,
errors
}}
}}"#,
bulk_entries
);
let start = PreciseTime::now();
let client = reqwest::Client::builder().build().unwrap();
let uri = format!("http://{}", self.config.hosturl().unwrap());
let mut map = ::std::collections::HashMap::new();
map.insert("query", mutation);
let end = match client.post(&uri).json(&map).send() {
Ok(_) => start.to(PreciseTime::now()).num_microseconds().unwrap(),
Err(e) => panic!("recv function failed: {:?}", e),
};
PerfResult::new("remote_gql_insert_bulk", end / self.iterations, end)
}
fn direct_udp_test(&self) -> PerfResult {
let mut times: Vec<i64> = Vec::new();
let port = self.config.get("direct_port").unwrap();
let host = self.config.hosturl().unwrap().to_owned();
let ip: Vec<&str> = host.split(':').collect();
let remote_addr = format!("{}:{}", ip[0], port);
let local_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let socket = UdpSocket::bind(local_addr).expect("Couldn't bind to address");
let message = json!({
"subsystem": "db-test",
"parameter": "voltage",
"value": "3.3"
});
for _ in 0..self.iterations {
let start = PreciseTime::now();
socket
.send_to(&ser::to_vec(&message).unwrap(), &remote_addr)
.unwrap();
times.push(start.to(PreciseTime::now()).num_microseconds().unwrap());
thread::sleep(Duration::from_millis(2));
}
let num_entries = times.len() as i64;
let sum: i64 = times.iter().sum();
let average = sum / num_entries;
PerfResult::new("remote_udp_insert", average, sum)
}
fn test_cleanup(&self) {
let mutation = r#"mutation {
delete(subsystem: "db-test") {
success,
errors,
entriesDeleted
}
}"#;
let client = reqwest::Client::builder().build().unwrap();
let uri = format!("http://{}", self.config.hosturl().unwrap());
let mut map = ::std::collections::HashMap::new();
map.insert("query", mutation);
let result: serde_json::Value =
client.post(&uri).json(&map).send().unwrap().json().unwrap();
match result.get("data").and_then(|msg| msg.get("delete")) {
Some(message) => {
let success = serde_json::from_value::<bool>(message["success"].clone()).unwrap();
let errors = serde_json::from_value::<String>(message["errors"].clone()).unwrap();
let entries_deleted =
serde_json::from_value::<i64>(message["entriesDeleted"].clone()).unwrap();
if success {
println!("Cleaned up {} test entries", entries_deleted);
} else {
eprintln!("Failed to deleted test entries: {}", errors);
}
}
None => eprintln!("Failed to process delete response"),
}
}
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optopt(
"i",
"iterations",
&format!(
"number of iterations (or entries) to insert. default is {}",
DEFAULT_ITERATIONS
),
"N",
);
opts.optopt("c", "config", "Path to config file", "CONFIG");
let mut iterations = DEFAULT_ITERATIONS;
let mut config = DEFAULT_PATH.to_string();
if let Ok(matches) = opts.parse(&args[1..]) {
iterations = matches
.opt_str("i")
.map(|iter| iter.parse::<i64>().unwrap_or(DEFAULT_ITERATIONS))
.unwrap_or(DEFAULT_ITERATIONS);
config = matches
.opt_str("c")
.unwrap_or_else(|| DEFAULT_PATH.to_string());
};
let db_test = DbTest::new(iterations, config);
println!(
"{} | {} | {}",
pad_name("NAME"),
pad_num("Avg (us)"),
pad_num("Total (us)")
);
println!(
"{}",
"-".repeat(TEST_NAME_MAX_COLS + (TEST_NUM_MAX_COLS * 2) + 6)
);
db_test.db_insert_test().print();
db_test.db_insert_bulk_test().print();
// This sleep likely isn't necessary, but I'd like to make extra sure nothing about a test
// lingers to affect the next one
thread::sleep(Duration::new(1, 0));
db_test.graphql_insert_test().print();
thread::sleep(Duration::new(1, 0));
db_test.graphql_insert_bulk_test().print();
thread::sleep(Duration::new(1, 0));
db_test.direct_udp_test().print();
thread::sleep(Duration::new(1, 0));
db_test.test_cleanup();
} | Err(e) => panic!("insert_bulk function failed: {:?}", e),
};
PerfResult::new("local_db_api_insert_bulk", end / self.iterations, end) |
mod.rs | //! Ferrum's HTTP Response representation and associated methods.
use std::fmt::{self, Debug};
use std::mem::replace;
use mime::Mime;
use typemap::{TypeMap, TypeMapInner};
use plugin::Extensible;
use hyper::{Body, HttpVersion};
use hyper::header::{ContentLength, ContentType, Location, Raw};
use {Plugin, Header, Headers, StatusCode};
pub use hyper::Response as HyperResponse;
pub mod content;
pub use self::content::*;
/// The response representation given to `Middleware`
pub struct Response {
/// The response status-code.
pub status: StatusCode,
/// The headers of the response.
pub headers: Headers,
/// The body of the response.
pub body: Option<Body>,
/// A TypeMap to be used as an extensible storage for data
/// associated with this Response.
pub extensions: TypeMap<TypeMapInner>,
}
impl Response {
/// Construct a blank Response
#[inline]
pub fn new() -> Response {
Response {
status: Default::default(),
headers: Headers::new(),
body: None, // Start with no body.
extensions: TypeMap::custom()
}
}
/// Construct a redirect Response
#[inline]
pub fn new_redirect<R: Into<Raw>>(location: R) -> Response {
let mut headers = Headers::new();
headers.set(Location::parse_header(&location.into()).unwrap());
Response {
status: StatusCode::Found,
headers,
body: None, // Start with no body.
extensions: TypeMap::custom()
}
}
/// Set the status and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_status(mut self, status: StatusCode) -> Self {
self.status = status;
self
}
/// Set a header and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_header<H: Header>(mut self, header: H) -> Self {
self.headers.set(header);
self
}
/// Set the headers and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_headers(mut self, headers: Headers) -> Self {
self.headers = headers;
self
}
/// Set the body and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_body<T: Into<Body>>(mut self, body: T) -> Self {
self.body = Some(body.into());
self
}
/// Set the content and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn with_content<C: Into<Content>>(mut self, content: C, mime: Mime) -> Self {
self.set_content(content, mime);
self
}
/// Set the content.
#[inline]
pub fn set_content<C: Into<Content>>(&mut self, content: C, mime: Mime) {
let content = content.into();
self.headers.set(ContentType(mime));
self.headers.set(ContentLength(content.len() as u64));
self.body = Some(content.into());
self.status = StatusCode::Ok;
}
/// Set the content-type mime and move the Response.
///
/// Useful for the "builder-style" pattern.
#[inline]
pub fn | (mut self, mime: Mime) -> Self {
self.set_mime(mime);
self
}
/// Set the content-type mime.
#[inline]
pub fn set_mime(&mut self, mime: Mime) {
self.headers.set(ContentType(mime));
}
}
impl From<HyperResponse> for Response {
fn from(mut from_response: HyperResponse) -> Response {
Response {
status: from_response.status(),
headers: replace(from_response.headers_mut(), Headers::new()),
body: if from_response.body_ref().is_some() { Some(from_response.body()) } else { None },
extensions: TypeMap::custom()
}
}
}
impl From<Response> for HyperResponse {
fn from(from_response: Response) -> HyperResponse {
HyperResponse::new()
.with_status(from_response.status)
.with_headers(from_response.headers)
.with_body(from_response.body.unwrap_or_default())
}
}
impl Debug for Response {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
writeln!(formatter, "{} {}\n{}",
HttpVersion::default(),
self.status,
self.headers
)
}
}
impl fmt::Display for Response {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(self, formatter)
}
}
// Allow plugins to attach to responses.
impl Extensible<TypeMapInner> for Response {
fn extensions(&self) -> &TypeMap<TypeMapInner> {
&self.extensions
}
fn extensions_mut(&mut self) -> &mut TypeMap<TypeMapInner> {
&mut self.extensions
}
}
impl Plugin for Response {}
#[cfg(test)]
mod test {
use super::*;
use hyper::header::{ContentType};
use futures::stream::Stream;
use futures::{future, Future};
use mime;
use std::str::from_utf8;
#[test]
fn test_create_response() {
let response = Response::new();
assert_eq!(response.status, StatusCode::Ok);
assert_eq!(response.headers, Headers::new());
assert!(response.body.is_none());
}
#[test]
fn test_response_from_hyper_response() {
let mut headers = Headers::new();
headers.set(ContentType(mime::TEXT_HTML));
let response = Response::from(
HyperResponse::new()
.with_status(StatusCode::NotFound)
.with_headers(headers.clone())
.with_body("Error")
);
assert_eq!(response.status, StatusCode::NotFound);
assert_eq!(response.headers, headers);
assert!(response.body.is_some());
let body = response.body.unwrap()
.concat2()
.and_then(|chunk| {
future::ok(String::from(from_utf8(&chunk).unwrap()))
})
.wait().unwrap();
assert_eq!(body, "Error");
}
#[test]
fn test_hyper_response_from_response() {
let mut headers = Headers::new();
headers.set(ContentType(mime::TEXT_HTML));
let response = HyperResponse::from(
Response {
status: StatusCode::NotFound,
headers: headers.clone(),
body: Some("Error".into()),
extensions: TypeMap::custom()
}
);
assert_eq!(response.status(), StatusCode::NotFound);
assert_eq!(response.headers(), &headers);
assert!(response.body_ref().is_some());
let body = response.body()
.concat2()
.and_then(|chunk| {
future::ok(String::from(from_utf8(&chunk).unwrap()))
})
.wait().unwrap();
assert_eq!(body, "Error");
}
} | with_mime |
parser.py | from argparse import _HelpAction, _SubParsersAction
import re
class NavigationException(Exception):
pass
def parser_navigate(parser_result, path, current_path=None):
if isinstance(path, str):
if path == '':
return parser_result
path = re.split(r'\s+', path)
current_path = current_path or []
if len(path) == 0:
return parser_result
if 'children' not in parser_result:
raise NavigationException(
'Current parser have no children elements. (path: %s)' %
' '.join(current_path))
next_hop = path.pop(0)
for child in parser_result['children']:
if child['name'] == next_hop:
current_path.append(next_hop)
return parser_navigate(child, path, current_path)
raise NavigationException(
'Current parser have no children element with name: %s (path: %s)' % (
next_hop, ' '.join(current_path)))
def _try_add_parser_attribute(data, parser, attribname):
attribval = getattr(parser, attribname, None)
if attribval is None:
return
if not isinstance(attribval, str):
return
if len(attribval) > 0:
data[attribname] = attribval
def _format_usage_without_prefix(parser):
"""
Use private argparse APIs to get the usage string without
the 'usage: ' prefix.
"""
fmt = parser._get_formatter()
fmt.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
return fmt.format_help().strip()
def parse_parser(parser, data=None, **kwargs):
if data is None:
data = {
'name': '',
'usage': parser.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(parser),
'prog': parser.prog,
}
_try_add_parser_attribute(data, parser, 'description')
_try_add_parser_attribute(data, parser, 'epilog')
for action in parser._get_positional_actions():
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
helps = {}
for item in action._choices_actions:
helps[item.dest] = item.help
for name, subaction in action._name_parser_map.items():
subaction.prog = '%s %s' % (parser.prog, name)
subdata = {
'name': name,
'help': helps[name] if name in helps else '',
'usage': subaction.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(subaction),
}
parse_parser(subaction, subdata, **kwargs)
if 'children' not in data:
data['children'] = []
data['children'].append(subdata)
continue
if 'args' not in data:
data['args'] = []
arg = {
'name': action.dest,
'help': action.help or '',
'metavar': action.metavar
}
if action.choices:
arg['choices'] = action.choices
data['args'].append(arg)
show_defaults = (
('skip_default_values' not in kwargs) or
(kwargs['skip_default_values'] is False))
for action in parser._get_optional_actions():
if isinstance(action, _HelpAction):
continue
if 'options' not in data:
data['options'] = []
option = {
'name': action.option_strings,
'default': action.default if show_defaults else '==SUPPRESS==',
'help': action.help or ''
}
if action.choices:
option['choices'] = action.choices
if "==SUPPRESS==" not in option['help']:
|
return data
| data['options'].append(option) |
traits.rs | use num_traits::{One, Zero};
use std::fmt;
use std::ops::{Add, Div, Mul, Sub};
pub trait WeightValOps<RHS = Self, Output = Self>
where
Self: Sized,
Self: Add<RHS, Output = Output>,
Self: Sub<RHS, Output = Output>,
Self: Mul<RHS, Output = Output>, |
impl<RHS, Output, T> WeightValOps<RHS, Output> for T
where
T: Sized,
T: Add<RHS, Output = Output>,
T: Sub<RHS, Output = Output>,
T: Mul<RHS, Output = Output>,
T: Div<RHS, Output = Output>,
{
}
pub trait WeightOps<Base>
where
Self: WeightValOps<Base, Base> + for<'a> WeightValOps<&'a Base, Base>,
{
}
impl<Base, T> WeightOps<Base> for T
where
T: WeightValOps<Base, Base> + for<'a> WeightValOps<&'a Base, Base>,
{
}
pub trait Weight: Clone + Ord + WeightOps<Self> + Zero + One + fmt::Debug {
fn from_i64(i64) -> Self;
#[inline]
fn fuzzy_eq(&self, other: &Self) -> bool {
self == other
}
} | Self: Div<RHS, Output = Output>,
{
} |
base64.rs | use error_chain::error_chain;
use std::str;
use base64::{encode, decode};
error_chain! {
foreign_links {
Base64(base64::DecodeError);
Utf8Error(str::Utf8Error);
}
}
fn | () -> Result<()> {
let hello = b"hello rustaceans";
let encoded = encode(hello);
let decoded = decode(&encoded)?;
println!("origin: {}", str::from_utf8(hello)?);
println!("base64 encoded: {}", encoded);
println!("back to origin: {}", str::from_utf8(&decoded)?);
Ok(())
}
| main |
event.rs | use anyhow::Result;
use crossterm::event::{self, Event as CrosstermEvent, KeyEvent, MouseEvent};
use std::sync::mpsc;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
use std::time::{Duration, Instant};
/// Representation of terminal events
/// ([`Crossterm events`] + [`Tick`]).
///
/// [`Crossterm events`]: crossterm::event::Event
/// [`Tick`]: Event::Tick
#[derive(Clone, Copy, Debug)]
pub enum Event {
/// Key press.
Key(KeyEvent),
/// Mouse click/scroll.
Mouse(MouseEvent),
/// Terminal resize.
Resize(u16, u16),
/// Terminal tick.
Tick,
}
/// Basic event handler for terminal [`events`].
///
/// Event types are handled in a common handler thread
/// and returned to a receiver.
///
/// [`events`]: Event
#[derive(Debug)]
pub struct EventHandler {
/// Event sender.
sender: mpsc::Sender<Event>,
/// Event receiver.
receiver: mpsc::Receiver<Event>,
/// Event handler thread.
handler: thread::JoinHandle<()>,
/// Is the key input disabled?
pub key_input_disabled: Arc<AtomicBool>,
}
impl EventHandler {
/// Constructs a new instance of `EventHandler`.
pub fn new(tick_rate: u64) -> Self {
let tick_rate = Duration::from_millis(tick_rate);
let (sender, receiver) = mpsc::channel();
let key_input_disabled = Arc::new(AtomicBool::new(false));
let handler = {
let sender = sender.clone();
let key_input_disabled = key_input_disabled.clone();
thread::spawn(move || {
let mut last_tick = Instant::now();
loop {
let timeout = tick_rate
.checked_sub(last_tick.elapsed())
.unwrap_or(tick_rate);
if key_input_disabled.load(Ordering::Relaxed) {
thread::sleep(timeout);
continue;
} else if event::poll(timeout).expect("no events available")
{
match event::read().expect("unable to read event") {
CrosstermEvent::Key(e) => {
sender.send(Event::Key(e))
}
CrosstermEvent::Mouse(e) => {
sender.send(Event::Mouse(e))
}
CrosstermEvent::Resize(w, h) => {
sender.send(Event::Resize(w, h))
}
}
.expect("failed to send terminal event")
}
if last_tick.elapsed() >= tick_rate |
}
})
};
Self {
sender,
receiver,
handler,
key_input_disabled,
}
}
/// Receive the next event from handler.
///
/// > This function will always block the current thread if
/// there is no data available and it's possible for more data to be sent.
///
/// (Note that [`Tick`] event is frequently received depending on the tick rate.)
///
/// [`Tick`]: Event::Tick
pub fn next(&self) -> Result<Event, mpsc::RecvError> {
self.receiver.recv()
}
}
#[cfg(feature = "tui-tests")]
#[cfg(test)]
mod tests {
use super::*;
use crossterm::event::{KeyCode, KeyModifiers};
use pretty_assertions::assert_eq;
#[test]
fn test_term_event() -> Result<()> {
let events = EventHandler::new(100);
for step in 0..2 {
if step == 1 {
let sender = events.sender.clone();
thread::spawn(move || {
sender.send(Event::Key(KeyEvent::new(
KeyCode::Esc,
KeyModifiers::NONE,
)))
});
}
match events.next()? {
Event::Key(key_event) => {
if key_event.code == KeyCode::Esc {
assert_eq!(1, step);
break;
}
}
Event::Tick => assert_eq!(0, step),
_ => {}
};
}
Ok(())
}
}
| {
sender
.send(Event::Tick)
.expect("failed to send tick event");
last_tick = Instant::now();
} |
generators.rs | //! `generators` module includes all the classes to deal with V3 format generators
use std::collections::HashMap;
use log::*;
use maplit::hashmap;
use pact_models::bodies::OptionalBody;
use pact_models::content_types::ContentType;
use pact_models::generators::{ContentTypeHandler, GenerateValue, Generator, GeneratorTestMode, JsonHandler, VariantMatcher};
use pact_models::matchingrules::MatchingRuleCategory;
use pact_models::path_exp::DocPath;
use pact_models::xml_utils::parse_bytes;
use pact_plugin_driver::catalogue_manager::find_content_generator;
use serde_json::{self, Value};
use sxd_document::dom::Document;
use crate::{CoreMatchingContext, DiffConfig, MatchingContext};
use crate::json::compare_json;
/// Implementation of a content type handler for XML (currently unimplemented).
pub struct XmlHandler<'a> {
/// XML document to apply the generators to.
pub value: Document<'a>
}
impl <'a> ContentTypeHandler<Document<'a>> for XmlHandler<'a> {
fn process_body(
&mut self,
_generators: &HashMap<DocPath, Generator>,
_mode: &GeneratorTestMode,
_context: &HashMap<&str, Value>,
_matcher: &Box<dyn VariantMatcher + Send + Sync>
) -> Result<OptionalBody, String> {
error!("UNIMPLEMENTED: Generators are not currently supported with XML");
Err("Generators are not supported with XML".to_string())
}
fn apply_key(
&mut self,
_key: &DocPath,
_generator: &dyn GenerateValue<Document<'a>>,
_context: &HashMap<&str, Value>,
_matcher: &Box<dyn VariantMatcher + Send + Sync>
) {
error!("UNIMPLEMENTED: Generators are not currently supported with XML");
}
}
/// Apply the generators to the body, returning a new body
pub async fn generators_process_body(
mode: &GeneratorTestMode,
body: &OptionalBody,
content_type: Option<ContentType>,
context: &HashMap<&str, Value>,
generators: &HashMap<DocPath, Generator>,
matcher: &(dyn VariantMatcher + Send + Sync)
) -> anyhow::Result<OptionalBody> {
match content_type {
Some(content_type) => if content_type.is_json() {
debug!("apply_body_generators: JSON content type");
let result: Result<Value, serde_json::Error> = serde_json::from_slice(&body.value().unwrap_or_default());
match result {
Ok(val) => {
let mut handler = JsonHandler { value: val };
Ok(handler.process_body(generators, mode, context, &matcher.boxed()).unwrap_or_else(|err| {
error!("Failed to generate the body: {}", err);
body.clone()
}))
},
Err(err) => {
error!("Failed to parse the body, so not applying any generators: {}", err);
Ok(body.clone())
}
}
} else if content_type.is_xml() {
debug!("apply_body_generators: XML content type");
match parse_bytes(&body.value().unwrap_or_default()) {
Ok(val) => {
let mut handler = XmlHandler { value: val.as_document() };
Ok(handler.process_body(generators, mode, context, &matcher.boxed()).unwrap_or_else(|err| {
error!("Failed to generate the body: {}", err);
body.clone()
}))
},
Err(err) => {
error!("Failed to parse the body, so not applying any generators: {}", err);
Ok(body.clone())
}
}
} else if let Some(content_generator) = find_content_generator(&content_type) {
debug!("apply_body_generators: Found a content generator from a plugin");
content_generator.generate_content(&content_type, &generators.iter()
.map(|(k, v)| (k.to_string(), v.clone())).collect(), body).await
} else {
warn!("Unsupported content type {} - Generators only support JSON and XML", content_type);
Ok(body.clone())
},
_ => Ok(body.clone())
}
}
pub(crate) fn find_matching_variant<T>(
value: &T,
variants: &[(usize, MatchingRuleCategory, HashMap<DocPath, Generator>)],
callback: &dyn Fn(&DocPath, &T, &dyn MatchingContext) -> bool
) -> Option<(usize, HashMap<DocPath, Generator>)>
where T: Clone + std::fmt::Debug {
let result = variants.iter()
.find(|(index, rules, _)| {
debug!("find_matching_variant: Comparing variant {} with value '{:?}'", index, value);
let context = CoreMatchingContext::new(DiffConfig::NoUnexpectedKeys,
rules, &hashmap!{});
let matches = callback(&DocPath::root(), value, &context);
debug!("find_matching_variant: Comparing variant {} => {}", index, matches);
matches
});
debug!("find_matching_variant: result = {:?}", result);
result.map(|(index, _, generators)| (*index, generators.clone()))
}
#[derive(Debug, Clone)]
pub(crate) struct DefaultVariantMatcher;
impl VariantMatcher for DefaultVariantMatcher {
fn | (
&self,
value: &Value,
variants: &Vec<(usize, MatchingRuleCategory, HashMap<DocPath, Generator>)>
) -> Option<(usize, HashMap<DocPath, Generator>)> {
let callback = |path: &DocPath, value: &Value, context: &dyn MatchingContext| {
compare_json(path, value, value, context).is_ok()
};
find_matching_variant(value, variants, &callback)
}
fn boxed(&self) -> Box<dyn VariantMatcher + Send + Sync> {
Box::new(self.clone())
}
}
#[cfg(test)]
mod tests {
use expectest::prelude::*;
use maplit::hashmap;
use pact_models::generators::{GenerateValue, Generator, VariantMatcher};
use pact_models::matchingrules::MatchingRule;
use pact_models::matchingrules_list;
use pact_models::path_exp::DocPath;
use pretty_assertions::assert_eq;
use serde_json::json;
use crate::generators::DefaultVariantMatcher;
#[test_log::test]
fn array_contains_generator_test() {
let generator = Generator::ArrayContains(vec![
(0, matchingrules_list! {
"body"; "$.href" => [ MatchingRule::Regex(".*(\\/orders\\/\\d+)$".into()) ]
}, hashmap! {
DocPath::new_unwrap("$.href") =>
Generator::MockServerURL(
"http://localhost:8080/orders/1234".into(),
".*(\\/orders\\/\\d+)$".into(),
)
}),
(1, matchingrules_list! {
"body"; "$.href" => [ MatchingRule::Regex(".*(\\/orders\\/\\d+)$".into()) ]
}, hashmap! {
DocPath::new_unwrap("$.href") =>
Generator::MockServerURL(
"http://localhost:8080/orders/1234".into(),
".*(\\/orders\\/\\d+)$".into(),
)
})
]);
let value = json!([
{
"href": "http://localhost:9000/orders/1234",
"method": "PUT",
"name": "update"
},
{
"href": "http://localhost:9000/orders/1234",
"method": "DELETE",
"name": "delete"
}
]);
let context = hashmap! {
"mockServer" => json!({
"href": "https://somewhere.else:1234/subpath"
})
};
let generated = generator.generate_value(&value, &context, &DefaultVariantMatcher.boxed());
expect!(generated.as_ref()).to(be_ok());
let generated_value = generated.unwrap();
assert_eq!(generated_value, json!([
{
"href": "https://somewhere.else:1234/subpath/orders/1234",
"method": "PUT",
"name": "update"
},
{
"href": "https://somewhere.else:1234/subpath/orders/1234",
"method": "DELETE",
"name": "delete"
}
]));
}
}
| find_matching_variant |
Flat.tsx | import React from 'react';
import { Box, Progress, Center, Heading } from 'native-base';
export const Example = () => {
return (
<Box w="90%">
<Center mb={8}> | <Progress rounded="0" size="lg" value={65} mx={4} />
</Box>
);
}; | <Heading>Flat Progress</Heading>
</Center> |
bot.py | # chatbot/bot.py
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
diego = ChatBot("Diego") | "chatterbot.corpus.english.conversations",
) |
trainer = ChatterBotCorpusTrainer(diego)
trainer.train(
"chatterbot.corpus.english.greetings", |
KDCReq.go | package messages
// Reference: https://www.ietf.org/rfc/rfc4120.txt
// Section: 5.4.1
import (
"crypto/rand"
"fmt"
"math"
"math/big"
"time"
"github.com/jcmturner/gofork/encoding/asn1"
"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
"gopkg.in/jcmturner/gokrb5.v7/config"
"gopkg.in/jcmturner/gokrb5.v7/crypto"
"gopkg.in/jcmturner/gokrb5.v7/iana"
"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
"gopkg.in/jcmturner/gokrb5.v7/iana/flags"
"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
"gopkg.in/jcmturner/gokrb5.v7/krberror"
"gopkg.in/jcmturner/gokrb5.v7/types"
)
type marshalKDCReq struct {
PVNO int `asn1:"explicit,tag:1"`
MsgType int `asn1:"explicit,tag:2"`
PAData types.PADataSequence `asn1:"explicit,optional,tag:3"`
ReqBody asn1.RawValue `asn1:"explicit,tag:4"`
}
// KDCReqFields represents the KRB_KDC_REQ fields.
type KDCReqFields struct {
PVNO int
MsgType int
PAData types.PADataSequence
ReqBody KDCReqBody
Renewal bool
}
// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
type ASReq struct {
KDCReqFields
}
// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
type TGSReq struct {
KDCReqFields
}
type marshalKDCReqBody struct {
KDCOptions asn1.BitString `asn1:"explicit,tag:0"`
CName types.PrincipalName `asn1:"explicit,optional,tag:1"`
Realm string `asn1:"generalstring,explicit,tag:2"`
SName types.PrincipalName `asn1:"explicit,optional,tag:3"`
From time.Time `asn1:"generalized,explicit,optional,tag:4"`
Till time.Time `asn1:"generalized,explicit,tag:5"`
RTime time.Time `asn1:"generalized,explicit,optional,tag:6"`
Nonce int `asn1:"explicit,tag:7"`
EType []int32 `asn1:"explicit,tag:8"`
Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"`
EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"`
}
// KDCReqBody implements the KRB_KDC_REQ request body.
type KDCReqBody struct {
KDCOptions asn1.BitString `asn1:"explicit,tag:0"`
CName types.PrincipalName `asn1:"explicit,optional,tag:1"`
Realm string `asn1:"generalstring,explicit,tag:2"`
SName types.PrincipalName `asn1:"explicit,optional,tag:3"`
From time.Time `asn1:"generalized,explicit,optional,tag:4"`
Till time.Time `asn1:"generalized,explicit,tag:5"`
RTime time.Time `asn1:"generalized,explicit,optional,tag:6"`
Nonce int `asn1:"explicit,tag:7"`
EType []int32 `asn1:"explicit,tag:8"`
Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"`
EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"`
}
// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request.
func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
sname := types.PrincipalName{
NameType: nametype.KRB_NT_SRV_INST,
NameString: []string{"krbtgt", realm},
}
return NewASReq(realm, c, cname, sname)
}
// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request.
func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
sname := types.PrincipalName{
NameType: nametype.KRB_NT_PRINCIPAL,
NameString: []string{"kadmin", "changepw"},
}
return NewASReq(realm, c, cname, sname)
}
// NewASReq generates a new KRB_AS_REQ struct for a given SNAME.
func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) {
nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
if err != nil {
return ASReq{}, err
}
t := time.Now().UTC()
// Copy the default options to make this thread safe
kopts := types.NewKrbFlags()
copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes)
kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength
a := ASReq{
KDCReqFields{
PVNO: iana.PVNO,
MsgType: msgtype.KRB_AS_REQ,
PAData: types.PADataSequence{},
ReqBody: KDCReqBody{
KDCOptions: kopts,
Realm: realm,
CName: cname,
SName: sname,
Till: t.Add(c.LibDefaults.TicketLifetime),
Nonce: int(nonce.Int64()),
EType: c.LibDefaults.DefaultTktEnctypeIDs,
},
},
}
if c.LibDefaults.Forwardable {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable)
}
if c.LibDefaults.Canonicalize {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize)
}
if c.LibDefaults.Proxiable {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable)
}
if c.LibDefaults.RenewLifetime != 0 {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable)
a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour)
}
if !c.LibDefaults.NoAddresses {
ha, err := types.LocalHostAddresses()
if err != nil {
return a, fmt.Errorf("could not get local addresses: %v", err)
}
ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
a.ReqBody.Addresses = ha
}
return a, nil
}
// NewTGSReq generates a new KRB_TGS_REQ struct.
func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) {
a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
if err != nil {
return a, err
}
err = a.setPAData(tgt, sessionKey)
return a, err
}
// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7)
func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) {
a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
if err != nil {
return a, err
}
a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT}
types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey)
err = a.setPAData(clientTGT, sessionKey)
return a, err
}
// tgsReq populates the fields for a TGS_REQ
func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) {
nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
if err != nil {
return TGSReq{}, err
}
t := time.Now().UTC()
k := KDCReqFields{
PVNO: iana.PVNO,
MsgType: msgtype.KRB_TGS_REQ,
ReqBody: KDCReqBody{
KDCOptions: types.NewKrbFlags(),
Realm: kdcRealm,
CName: cname, // Add the CName to make validation of the reply easier
SName: sname,
Till: t.Add(c.LibDefaults.TicketLifetime),
Nonce: int(nonce.Int64()),
EType: c.LibDefaults.DefaultTGSEnctypeIDs,
},
Renewal: renewal,
}
if c.LibDefaults.Forwardable {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable)
}
if c.LibDefaults.Canonicalize {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize)
}
if c.LibDefaults.Proxiable {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable)
}
if c.LibDefaults.RenewLifetime > time.Duration(0) {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
}
if !c.LibDefaults.NoAddresses {
ha, err := types.LocalHostAddresses()
if err != nil {
return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err)
}
ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
k.ReqBody.Addresses = ha
}
if renewal {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew)
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
}
return TGSReq{
k,
}, nil
}
func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error {
// Marshal the request and calculate checksum
b, err := k.ReqBody.Marshal()
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body")
}
etype, err := crypto.GetEtype(sessionKey.KeyType)
if err != nil {
return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator")
}
cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM)
if err != nil {
return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash")
}
// Form PAData for TGS_REQ
// Create authenticator
auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName)
if err != nil {
return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
}
auth.Cksum = types.Checksum{
CksumType: etype.GetHashID(),
Checksum: cb,
}
// Create AP_REQ
apReq, err := NewAPReq(tgt, sessionKey, auth)
if err != nil {
return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ")
}
apb, err := apReq.Marshal()
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data")
}
k.PAData = types.PADataSequence{
types.PAData{
PADataType: patype.PA_TGS_REQ,
PADataValue: apb,
},
}
return nil
}
// Unmarshal bytes b into the ASReq struct.
func (k *ASReq) Unmarshal(b []byte) error {
var m marshalKDCReq
_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ))
if err != nil |
expectedMsgType := msgtype.KRB_AS_REQ
if m.MsgType != expectedMsgType {
return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
}
var reqb KDCReqBody
err = reqb.Unmarshal(m.ReqBody.Bytes)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body")
}
k.MsgType = m.MsgType
k.PAData = m.PAData
k.PVNO = m.PVNO
k.ReqBody = reqb
return nil
}
// Unmarshal bytes b into the TGSReq struct.
func (k *TGSReq) Unmarshal(b []byte) error {
var m marshalKDCReq
_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ))
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ")
}
expectedMsgType := msgtype.KRB_TGS_REQ
if m.MsgType != expectedMsgType {
return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
}
var reqb KDCReqBody
err = reqb.Unmarshal(m.ReqBody.Bytes)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body")
}
k.MsgType = m.MsgType
k.PAData = m.PAData
k.PVNO = m.PVNO
k.ReqBody = reqb
return nil
}
// Unmarshal bytes b into the KRB_KDC_REQ body struct.
func (k *KDCReqBody) Unmarshal(b []byte) error {
var m marshalKDCReqBody
_, err := asn1.Unmarshal(b, &m)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body")
}
k.KDCOptions = m.KDCOptions
if len(k.KDCOptions.Bytes) < 4 {
tb := make([]byte, 4-len(k.KDCOptions.Bytes))
k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...)
k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8
}
k.CName = m.CName
k.Realm = m.Realm
k.SName = m.SName
k.From = m.From
k.Till = m.Till
k.RTime = m.RTime
k.Nonce = m.Nonce
k.EType = m.EType
k.Addresses = m.Addresses
k.EncAuthData = m.EncAuthData
if len(m.AdditionalTickets.Bytes) > 0 {
k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets")
}
}
return nil
}
// Marshal ASReq struct.
func (k *ASReq) Marshal() ([]byte, error) {
m := marshalKDCReq{
PVNO: k.PVNO,
MsgType: k.MsgType,
PAData: k.PAData,
}
b, err := k.ReqBody.Marshal()
if err != nil {
var mk []byte
return mk, err
}
m.ReqBody = asn1.RawValue{
Class: asn1.ClassContextSpecific,
IsCompound: true,
Tag: 4,
Bytes: b,
}
mk, err := asn1.Marshal(m)
if err != nil {
return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
}
mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ)
return mk, nil
}
// Marshal TGSReq struct.
func (k *TGSReq) Marshal() ([]byte, error) {
m := marshalKDCReq{
PVNO: k.PVNO,
MsgType: k.MsgType,
PAData: k.PAData,
}
b, err := k.ReqBody.Marshal()
if err != nil {
var mk []byte
return mk, err
}
m.ReqBody = asn1.RawValue{
Class: asn1.ClassContextSpecific,
IsCompound: true,
Tag: 4,
Bytes: b,
}
mk, err := asn1.Marshal(m)
if err != nil {
return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
}
mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ)
return mk, nil
}
// Marshal KRB_KDC_REQ body struct.
func (k *KDCReqBody) Marshal() ([]byte, error) {
var b []byte
m := marshalKDCReqBody{
KDCOptions: k.KDCOptions,
CName: k.CName,
Realm: k.Realm,
SName: k.SName,
From: k.From,
Till: k.Till,
RTime: k.RTime,
Nonce: k.Nonce,
EType: k.EType,
Addresses: k.Addresses,
EncAuthData: k.EncAuthData,
}
rawtkts, err := MarshalTicketSequence(k.AdditionalTickets)
if err != nil {
return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets")
}
//The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody
rawtkts.Tag = 11
if len(rawtkts.Bytes) > 0 {
m.AdditionalTickets = rawtkts
}
b, err = asn1.Marshal(m)
if err != nil {
return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body")
}
return b, nil
}
| {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ")
} |
mfr2.py | import os
import shutil
import os
from glob import glob
import pandas as pd
import random
from collections import defaultdict
from PIL import Image
from torch.utils.data import Dataset, DataLoader
def get_all_images(dir):
|
def casia(dir):
files = get_all_images(dir)
users = defaultdict(set)
rows = []
for file in files:
user = file.split("/")[-2]
users[user].add(file)
rows.append({
"image": file,
"id": user
})
df = pd.DataFrame(rows)
positives = []
negatives = []
for user, files in users.items():
if len(files) <= 1:
continue
samples = random.sample(files, 2)
positives.append({
"image1": samples[0],
"image2": samples[1],
"id1": user,
"id2": user,
"label": 1
})
user_ids = list(users.keys())
for i in range(0, len(user_ids), 2):
if i == len(user_ids) - 1:
continue
id1, id2 = user_ids[i], user_ids[i + 1]
files1, files2 = users[id1], users[id2]
if len(files1) < 2 or len(files2) < 2:
break
samples1, samples2 = random.sample(files1, 2), random.sample(files2, 2)
for j in range(2):
negatives.append({
"image1": samples1[j],
"image2": samples2[j],
"id1": id1,
"id2": id2,
"label": -1
})
test_set = pd.DataFrame(positives + negatives)
return df, test_set
# trainset, testset = casia("train/")
# trainset.to_csv("train.csv", index=False)
# testset.to_csv("train_eval.csv", index=False)
for file in glob("dataset/validation/**/*.png", recursive=True):
tokens = file.split("/")
filename = tokens[-1]
id = tokens[-3]
dst = f"mfeval/{id}/{filename}"
os.makedirs(os.path.abspath(os.path.dirname(dst)), exist_ok=True)
shutil.copyfile(file, dst) | types = ["jpeg", "jpg", "png"]
files = []
for t in types:
path = os.path.join(dir, "**", "*." + t)
files.extend(glob(path))
return files |
test_big.py | import os
from load_data import load_batch, load_data_names, load_batch_from_names, load_batch_from_names_random
from my_model import get_eye_tracker_model
import numpy as np
from keras.models import load_model
from keras.optimizers import SGD, adam
def generator(data, batch_size, img_cols, img_rows, img_ch):
|
def test_big(args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.dev
names_path = r"C:\Users\Aliab\PycharmProjects\data\test"
print("Names to test: {}".format(names_path))
dataset_path = r"D:\GazeCapture"
print("Dataset: {}".format(names_path))
weights_path = "weight_vgg.hdf5"
print("Weights: {}".format(weights_path))
# image parameter
img_cols = 128
img_rows = 128
img_ch = 3
# test parameter
batch_size = 64
chunk_size = 500
# model
model = get_eye_tracker_model(img_cols, img_rows, img_ch)
# model summary
model.summary()
# weights
print("Loading weights...")
model = load_model(weights_path)
model.load_weights(weights_path)
# data
test_names = load_data_names(names_path)
# limit amount of testing data
# test_names = test_names[:1000]
# results
err_x = []
err_y = []
print("Loading testing data...")
for it in list(range(0, len(test_names), chunk_size)):
x, y = load_batch_from_names_random(test_names[it:it + chunk_size], dataset_path, batch_size, img_cols, img_rows, img_ch)
# x, y = load_batch_from_names(test_names[it:it + chunk_size], dataset_path, img_ch, img_cols, img_rows)
predictions = model.predict(x=x, batch_size=batch_size, verbose=1)
# print and analyze predictions
for i, prediction in enumerate(predictions):
print("PR: {} {}".format(prediction[0], prediction[1]))
print("GT: {} {} \n".format(y[i][0], y[i][1]))
err_x.append(abs(prediction[0] - y[i][0]))
err_y.append(abs(prediction[1] - y[i][1]))
# mean absolute error
mae_x = np.mean(err_x)
mae_y = np.mean(err_y)
# standard deviation
std_x = np.std(err_x)
std_y = np.std(err_y)
# final results
print("MAE: {} {} ( samples)".format(mae_x, mae_y))
print("STD: {} {} ( samples)".format(std_x, std_y))
if __name__ == '__main__':
test_big()
| while True:
for it in list(range(0, data[0].shape[0], batch_size)):
x, y = load_batch([l[it:it + batch_size] for l in data], img_cols, img_rows, img_ch)
yield x, y |
Gallery.js | import React, { Component } from 'react'
import { getIpfsGateway } from 'utils/config'
class | extends Component {
state = { active: 0 }
render() {
const { pics } = this.props
const active = pics[this.state.active]
const ipfsGateway = getIpfsGateway()
if (!active) return null
return (
<div className="gallery">
<div
className="main-pic"
style={{
backgroundImage: `url(${ipfsGateway}/${active.url.replace(
':/',
''
)})`
}}
/>
{pics.length === 1 ? null : (
<div className="thumbnails">
{pics.map((m, idx) => (
<img
key={idx}
onClick={() => this.setState({ active: idx })}
src={`${ipfsGateway}/${m.url.replace(':/', '')}`}
className={this.state.active === idx ? 'active' : ''}
/>
))}
</div>
)}
</div>
)
}
}
export default Gallery
require('react-styl')(`
.gallery
.main-pic
height: 200px
background-size: contain
background-repeat: no-repeat
background-position: center
margin-bottom: 2px
.thumbnails
overflow-x: auto
width: 100%
white-space: nowrap
img
max-height: 80px
cursor: pointer
border-width: 2px
border-style: solid
border-color: #fff
opacity: 0.75
&:hover
opacity: 1
&.active
border-color: #ff6
opacity: 1
`)
| Gallery |
index.js | import React from "react";
const dom = (eq) => ({
type: "math",
subtype: "summationSymbol",
children: [{ text: "\u00b7" }],
});
const Element = (attributes, children) => {
return <span {...attributes}>{children}</span>;
};
const icon = (attributes, children) => {
return <span {...attributes}> ·{children}</span>; | };
export default { slateDOM: dom, MathElement: Element, Icon: icon }; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.