file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
logStream.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package cloudwatch
import (
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/go/pulumi"
)
// Provides a CloudWatch Log Stream resource.
//
// > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/cloudwatch_log_stream.html.markdown.
type LogStream struct {
s *pulumi.ResourceState
}
// NewLogStream registers a new resource with the given unique name, arguments, and options.
func NewLogStream(ctx *pulumi.Context,
name string, args *LogStreamArgs, opts ...pulumi.ResourceOpt) (*LogStream, error) |
// GetLogStream gets an existing LogStream resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetLogStream(ctx *pulumi.Context,
name string, id pulumi.ID, state *LogStreamState, opts ...pulumi.ResourceOpt) (*LogStream, error) {
inputs := make(map[string]interface{})
if state != nil {
inputs["arn"] = state.Arn
inputs["logGroupName"] = state.LogGroupName
inputs["name"] = state.Name
}
s, err := ctx.ReadResource("aws:cloudwatch/logStream:LogStream", name, id, inputs, opts...)
if err != nil {
return nil, err
}
return &LogStream{s: s}, nil
}
// URN is this resource's unique name assigned by Pulumi.
func (r *LogStream) URN() *pulumi.URNOutput {
return r.s.URN()
}
// ID is this resource's unique identifier assigned by its provider.
func (r *LogStream) ID() *pulumi.IDOutput {
return r.s.ID()
}
// The Amazon Resource Name (ARN) specifying the log stream.
func (r *LogStream) Arn() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["arn"])
}
// The name of the log group under which the log stream is to be created.
func (r *LogStream) LogGroupName() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["logGroupName"])
}
// The name of the log stream. Must not be longer than 512 characters and must not contain `:`
func (r *LogStream) Name() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["name"])
}
// Input properties used for looking up and filtering LogStream resources.
type LogStreamState struct {
// The Amazon Resource Name (ARN) specifying the log stream.
Arn interface{}
// The name of the log group under which the log stream is to be created.
LogGroupName interface{}
// The name of the log stream. Must not be longer than 512 characters and must not contain `:`
Name interface{}
}
// The set of arguments for constructing a LogStream resource.
type LogStreamArgs struct {
// The name of the log group under which the log stream is to be created.
LogGroupName interface{}
// The name of the log stream. Must not be longer than 512 characters and must not contain `:`
Name interface{}
}
| {
if args == nil || args.LogGroupName == nil {
return nil, errors.New("missing required argument 'LogGroupName'")
}
inputs := make(map[string]interface{})
if args == nil {
inputs["logGroupName"] = nil
inputs["name"] = nil
} else {
inputs["logGroupName"] = args.LogGroupName
inputs["name"] = args.Name
}
inputs["arn"] = nil
s, err := ctx.RegisterResource("aws:cloudwatch/logStream:LogStream", name, true, inputs, opts...)
if err != nil {
return nil, err
}
return &LogStream{s: s}, nil
} |
main.rs | pub use raylib::prelude::*;
pub mod example;
type SampleOut = Box<dyn for<'a> FnMut(&'a mut RaylibHandle, &'a RaylibThread) -> ()>;
type Sample = fn(&mut RaylibHandle, &RaylibThread) -> SampleOut;
use std::cell::RefCell;
thread_local! (static APP: RefCell<Option<Box<dyn FnMut() -> bool>>> = RefCell::new(None));
fn main() {
// Set the emscripten main loop before setting up raylib so that raylib has something
// to configure
// #[cfg(target_arch = "wasm32")]
// unsafe {
// wasm::emscripten_set_main_loop(wasm::_nothing_wasm, 0, 1);
// }
let title = "Showcase";
let screen_width = 800;
let screen_height = 640;
let (mut rl, thread) = raylib::init()
.size(screen_width, screen_height)
.title(title)
.vsync()
.msaa_4x()
.build();
rl.set_exit_key(None);
let samples: Vec<(&std::ffi::CStr, Sample)> = vec![
(rstr!("Core2D Camera"), example::core::core_2d_camera::run),
(
rstr!("Core2D Camera Platformer"),
example::core::core_2d_camera_platformer::run,
),
(
rstr!("raygui - controls test suite"),
example::controls_test_suite::controls_test_suite::run,
),
(
rstr!("raylib [models] example - pbr material"),
example::models::models_material_pbr::run,
),
(
rstr!("rlgl standalone"),
example::others::rlgl_standalone::run,
),
(
rstr!("raylib [textures] example - bunnymark"),
example::textures::textures_bunnymark::run,
),
(
rstr!("raylib [models] example - model animation"),
example::models::models_animation::run,
),
(
rstr!("raylib [core] example - scissor test"),
example::core::core_scissor_test::run,
),
(
rstr!("raylib [audio] example - music playing (streaming)"),
example::audio::audio_music_stream::run,
),
(
rstr!("raylib [shaders] example - postprocessing shader"),
example::shaders::shaders_postprocessing::run,
),
(
rstr!("raylib [texture] example - texture rectangle"),
example::textures::textures_rectangle::run,
),
(
rstr!("raylib [textures] example - mouse painting"),
example::textures::textures_mouse_painting::run,
),
];
let mut sample = None;
let mut list_view_active = -1;
let mut list_view_focus = -1;
let mut list_view_scroll_index = -1;
let box_length = (50 * samples.len() as i32).min(500);
let y_margin = (screen_height - box_length) / 2;
let frame: Box<dyn FnMut() -> bool> = Box::new(move || {
match &mut sample {
None => {
let mut to_run = None;
{
let mut d = rl.begin_drawing(&thread);
d.clear_background(Color::WHITE);
let list: Vec<_> = samples.iter().map(|(s, _)| *s).collect();
list_view_active = d.gui_list_view_ex(
rrect(200.0, y_margin, 400, box_length),
list.as_slice(),
&mut list_view_focus,
&mut list_view_scroll_index,
list_view_active,
);
if list_view_active >= 0 {
to_run.replace(samples[list_view_active as usize].1);
}
}
match to_run {
Some(run) => sample = Some(run(&mut rl, &thread)),
_ => {}
}
}
Some(ref mut run) => {
(*run)(&mut rl, &thread);
if rl.is_key_down(raylib::consts::KeyboardKey::KEY_BACKSPACE) {
sample = None;
rl.set_window_size(screen_width, screen_height);
rl.set_window_title(&thread, title);
list_view_active = -1;
}
}
};
return rl.window_should_close();
});
APP.with(|app| {
app.borrow_mut().replace(frame);
});
// absolutely NONE of this is necessary. You could use a while !update() {} loop in
// wasm without any problems as long as you compile with ASYNCIFY.
// This shows you how to do it using emscripten_set_main_loop.
#[cfg(not(target_arch = "wasm32"))]
{
while !update() {}
}
#[cfg(target_arch = "wasm32")]
unsafe {
wasm::emscripten_set_main_loop(wasm::_update_wasm, 0, 1);
}
}
fn update() -> bool {
APP.with(|app| match *app.borrow_mut() {
None => false,
Some(ref mut frame) => frame(),
})
}
#[cfg(target_arch = "wasm32")]
#[allow(dead_code)]
mod wasm {
use std::os::raw::{c_int, c_uchar};
#[allow(non_camel_case_types)]
type em_callback_func = unsafe extern "C" fn();
extern "C" {
// This extern is built in by Emscripten.
pub fn emscripten_sample_gamepad_data();
pub fn emscripten_run_script_int(x: *const c_uchar) -> c_int;
pub fn emscripten_cancel_main_loop();
pub fn emscripten_set_main_loop(
func: em_callback_func,
fps: c_int,
simulate_infinite_loop: c_int,
);
}
pub extern "C" fn _update_wasm() {
super::update();
}
pub extern "C" fn _nothing_wasm() |
}
| {} |
tasks.py | """
Definitions of tasks executed by Celery
"""
import logging
from web_app.extensions import celery
from web_app.extensions import db
from web_app.models.example_table import ExampleTable
from web_app.scheduled_tasks.scheduled_task import example_scheduled_task
@celery.task(name="healthcheck_task")
def healthcheck_task():
"""Healthcheck task"""
i = db.session.query(ExampleTable)
logging.info(i)
@celery.task(name='example_task_scheduled')
def example_task_scheduled():
"""Periodic task to update TriageIssue state"""
logging.info('Update triage issues invoked')
example_scheduled_task()
@celery.task(name='make_sure_cron_works')
def test_task():
| """Helper task to make sure than scheduled tasks work as expected"""
logging.debug('Cron works as expected') |
|
csv_points_to_vector.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Prof. John Lindsay
Created: 07/08/2019
Last Modified: 28/01/2020
License: MIT
*/
use crate::spatial_ref_system::esri_wkt_from_epsg;
use crate::tools::*;
use crate::vector::{AttributeField, FieldData, FieldDataType, ShapeType, Shapefile};
use std::env;
use std::fs::File;
use std::io::prelude::*;
use std::io::{BufReader, Error, ErrorKind};
use std::path;
use std::{f64, i32};
/// This tool can be used to import a series of points contained within a comma-separated values
/// (*.csv) file (`--input`) into a vector shapefile of a POINT ShapeType. The input file must be an ASCII text
/// file with a .csv extensions. The tool will automatically detect the field data type; for numeric
/// fields, it will also determine the appropriate length and precision. The user must specify the
/// x-coordinate (`--xfield`) and y-coordiante (`--yfield`) fields. All fields are imported as
/// attributes in the output (`--output`) vector file. The tool assumes that the first line of the file is a header line from which field
/// names are retreived.
///
/// # See Also
/// `MergeTableWithCsv`, `ExportTableToCsv`
pub struct CsvPointsToVector {
name: String,
description: String,
toolbox: String,
parameters: Vec<ToolParameter>,
example_usage: String,
}
impl CsvPointsToVector {
/// public constructor
pub fn new() -> CsvPointsToVector {
let name = "CsvPointsToVector".to_string();
let toolbox = "Data Tools".to_string();
let description = "Converts a CSV text file to vector points.".to_string();
let mut parameters = vec![];
parameters.push(ToolParameter {
name: "Input CSV File".to_owned(),
flags: vec!["-i".to_owned(), "--input".to_owned()],
description: "Input CSV file (i.e. source of data to be imported).".to_owned(),
parameter_type: ParameterType::ExistingFile(ParameterFileType::Csv),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Output Vector File".to_owned(),
flags: vec!["-o".to_owned(), "--output".to_owned()],
description: "Output vector file.".to_owned(),
parameter_type: ParameterType::NewFile(ParameterFileType::Vector(
VectorGeometryType::Any,
)),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "X Field Number (zero-based)".to_owned(),
flags: vec!["--xfield".to_owned()],
description: "X field number (e.g. 0 for first field).".to_owned(),
parameter_type: ParameterType::Integer,
default_value: Some("0".to_owned()),
optional: true,
});
parameters.push(ToolParameter {
name: "Y Field Number (zero-based)".to_owned(),
flags: vec!["--yfield".to_owned()],
description: "Y field number (e.g. 1 for second field).".to_owned(),
parameter_type: ParameterType::Integer,
default_value: Some("1".to_owned()),
optional: true,
});
parameters.push(ToolParameter {
name: "EPSG Projection".to_owned(),
flags: vec!["--epsg".to_owned()],
description: "EPSG projection (e.g. 2958).".to_owned(),
parameter_type: ParameterType::Integer,
default_value: None,
optional: true,
});
let sep: String = path::MAIN_SEPARATOR.to_string();
let p = format!("{}", env::current_dir().unwrap().display());
let e = format!("{}", env::current_exe().unwrap().display());
let mut short_exe = e
.replace(&p, "")
.replace(".exe", "")
.replace(".", "")
.replace(&sep, "");
if e.contains(".exe") {
short_exe += ".exe";
}
let usage = format!(
">>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i=points.csv -o=points.shp --xfield=0 --yfield=1 --epsg=4326",
short_exe, name
).replace("*", &sep);
CsvPointsToVector {
name: name,
description: description,
toolbox: toolbox,
parameters: parameters,
example_usage: usage,
}
}
}
impl WhiteboxTool for CsvPointsToVector {
fn get_source_file(&self) -> String {
String::from(file!())
}
fn get_tool_name(&self) -> String {
self.name.clone()
}
fn get_tool_description(&self) -> String |
fn get_tool_parameters(&self) -> String {
match serde_json::to_string(&self.parameters) {
Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str),
Err(err) => return format!("{:?}", err),
}
}
fn get_example_usage(&self) -> String {
self.example_usage.clone()
}
fn get_toolbox(&self) -> String {
self.toolbox.clone()
}
fn run<'a>(
&self,
args: Vec<String>,
working_directory: &'a str,
verbose: bool,
) -> Result<(), Error> {
let mut input_file = String::new();
let mut output_file = String::new();
// let mut field_definitions = String::new();
let mut x_field = 0;
let mut y_field = 1;
let mut epsg = 0u16;
let mut projection_set = false;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
input_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-o" || flag_val == "-output" {
output_file = if keyval {
vec[1].to_string()
} else {
args[i + 1].to_string()
};
} else if flag_val == "-xfield" {
x_field = if keyval {
vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as usize
} else {
args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as usize
};
} else if flag_val == "-yfield" {
y_field = if keyval {
vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as usize
} else {
args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as usize
};
} else if flag_val == "-epsg" {
epsg = if keyval {
vec[1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as u16
} else {
args[i + 1]
.to_string()
.parse::<f32>()
.expect(&format!("Error parsing {}", flag_val)) as u16
};
projection_set = true;
}
}
if verbose {
println!("***************{}", "*".repeat(self.get_tool_name().len()));
println!("* Welcome to {} *", self.get_tool_name());
println!("***************{}", "*".repeat(self.get_tool_name().len()));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
// File strings need a full directory
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
let start = Instant::now();
if verbose {
println!("Reading data...")
};
// read in the CSV file
let mut data = vec![];
let f = match File::open(input_file.clone()) {
Ok(v) => v,
Err(_) => {
return Err(Error::new(
ErrorKind::InvalidInput,
"Error opening the CSV file.",
));
}
};
let f = BufReader::new(f);
let mut csv_headers: Vec<String> = vec![];
let mut csv_num_fields = 0;
let mut field_types = vec![];
let mut record_num = 0;
let mut delimiter = ",";
let mut field_indices_to_append = vec![];
let mut field_lengths: Vec<u8> = vec![];
let mut field_precision: Vec<u8> = vec![];
for line in f.lines() {
let line_unwrapped = line.unwrap();
if !line_unwrapped.trim().is_empty() {
let mut line_split = line_unwrapped.split(delimiter);
let mut line_vec = line_split.collect::<Vec<&str>>();
if line_vec.len() == 1 {
delimiter = ";";
line_split = line_unwrapped.split(delimiter);
line_vec = line_split.collect::<Vec<&str>>();
if line_vec.len() == 1 {
delimiter = " ";
line_split = line_unwrapped.split(delimiter);
line_vec = line_split.collect::<Vec<&str>>();
}
}
if record_num == 0 {
csv_num_fields = line_vec.len();
for i in 0..csv_num_fields {
csv_headers.push(line_vec[i].trim().to_owned());
}
} else {
// is the record an appropriate length?
if line_vec.len() != csv_num_fields {
return Err(Error::new(
ErrorKind::InvalidInput,
"Not all records in the CSV file are the same length. Cannot read the table.",
));
}
if record_num == 1 {
// the first data record
for a in 0..csv_num_fields {
if a == x_field || a == y_field {
field_types.push(FieldDataType::Real); // It has to be floating point data.
} else {
field_types.push(get_type(line_vec[a]));
}
field_indices_to_append.push(a);
}
field_lengths = vec![0u8; csv_num_fields];
field_precision = vec![0u8; csv_num_fields];
}
let mut imported_data: Vec<FieldData> = Vec::with_capacity(csv_num_fields);
for a in 0..csv_num_fields {
if line_vec[a].len() as u8 > field_lengths[a] {
field_lengths[a] = line_vec[a].len() as u8;
}
if a == x_field || a == y_field {
let prec = get_precision(line_vec[a]);
if prec > field_precision[a] {
field_precision[a] = prec;
}
imported_data
.push(FieldData::Real(line_vec[a].trim().parse::<f64>().unwrap()))
} else {
match field_types[a] {
FieldDataType::Int => imported_data.push(FieldData::Int(
line_vec[a].trim().parse::<i32>().unwrap(),
)),
FieldDataType::Real => {
let prec = get_precision(line_vec[a]);
if prec > field_precision[a] {
field_precision[a] = prec;
}
imported_data.push(FieldData::Real(
line_vec[a].trim().parse::<f64>().unwrap(),
))
}
FieldDataType::Bool => imported_data.push(FieldData::Bool(
line_vec[a].trim().parse::<bool>().unwrap(),
)),
FieldDataType::Text => imported_data
.push(FieldData::Text(line_vec[a].trim().to_string())),
FieldDataType::Date => imported_data
.push(FieldData::Text(line_vec[a].trim().to_string())),
}
}
}
data.push(imported_data);
}
}
record_num += 1;
}
// make sure that the x and y fields are numeric
if field_types[x_field] != FieldDataType::Real
|| field_types[y_field] != FieldDataType::Real
{
return Err(Error::new(
ErrorKind::InvalidInput,
"Either the x or y fields, or both, do not contain floating-point numerical data.",
));
}
// create output file
let mut output = Shapefile::new(&output_file, ShapeType::Point)?;
if projection_set {
// set the projection information
output.projection = esri_wkt_from_epsg(epsg.clone());
}
// add the attributes
for a in 0..csv_num_fields {
output.attributes.add_field(&AttributeField::new(
&csv_headers[a],
field_types[a].clone(),
field_lengths[a],
field_precision[a],
));
}
// print the attribute data
let (mut x, mut y): (f64, f64);
let mut rec_num = 1i32;
for record_num in 0..data.len() {
// geometries
x = match data[record_num][x_field] {
FieldData::Real(v) => v,
_ => 0f64,
};
y = match data[record_num][y_field] {
FieldData::Real(v) => v,
_ => 0f64,
};
output.add_point_record(x, y);
// attributes
rec_num += 1;
output
.attributes
.add_record(data[record_num].clone(), false);
if verbose {
progress = (100.0_f64 * (rec_num + 1) as f64 / data.len() as f64) as usize;
if progress != old_progress {
println!("Progress: {}%", progress);
old_progress = progress;
}
}
}
if verbose {
println!("Saving data...")
};
let _ = match output.write() {
Ok(_) => {
if verbose {
println!("Output file written")
}
}
Err(e) => return Err(e),
};
let elapsed_time = get_formatted_elapsed_time(start);
if verbose {
println!("{}", &format!("Elapsed Time: {}", elapsed_time));
}
Ok(())
}
}
fn get_type(s: &str) -> FieldDataType {
if s.trim().parse::<i32>().unwrap_or(i32::MIN) != i32::MIN {
if s.trim().contains(".0") {
return FieldDataType::Real;
} else {
return FieldDataType::Int;
}
} else if s.trim().parse::<f64>().unwrap_or(f64::INFINITY) != f64::INFINITY {
return FieldDataType::Real;
}
let is_bool = match s.trim().to_lowercase().parse::<bool>() {
Ok(_) => true,
Err(_) => false,
};
if is_bool {
return FieldDataType::Bool;
}
// There's no easy way to parse data type strings.
FieldDataType::Text
}
fn get_precision(s: &str) -> u8 {
let dec_pos = match s.chars().position(|c| c == '.') {
Some(p) => p,
None => return 0u8,
};
(s.len() - dec_pos - 1) as u8
}
| {
self.description.clone()
} |
api_op_ListRegexPatternSets.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package waf
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/waf/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// This is AWS WAF Classic documentation. For more information, see AWS WAF Classic
// (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html)
// in the developer guide. For the latest version of AWS WAF, use the AWS WAFV2 API
// and see the AWS WAF Developer Guide
// (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). With
// the latest version, AWS WAF has a single set of endpoints for regional and
// global use. Returns an array of RegexPatternSetSummary objects.
func (c *Client) ListRegexPatternSets(ctx context.Context, params *ListRegexPatternSetsInput, optFns ...func(*Options)) (*ListRegexPatternSetsOutput, error) {
if params == nil {
params = &ListRegexPatternSetsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListRegexPatternSets", params, optFns, addOperationListRegexPatternSetsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListRegexPatternSetsOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListRegexPatternSetsInput struct {
// Specifies the number of RegexPatternSet objects that you want AWS WAF to return
// for this request. If you have more RegexPatternSet objects than the number you
// specify for Limit, the response includes a NextMarker value that you can use to
// get another batch of RegexPatternSet objects.
Limit int32
// If you specify a value for Limit and you have more RegexPatternSet objects than
// the value of Limit, AWS WAF returns a NextMarker value in the response that
// allows you to list another group of RegexPatternSet objects. For the second and
// subsequent ListRegexPatternSets requests, specify the value of NextMarker from
// the previous response to get information about another batch of RegexPatternSet
// objects.
NextMarker *string
}
type ListRegexPatternSetsOutput struct {
// If you have more RegexPatternSet objects than the number that you specified for
// Limit in the request, the response includes a NextMarker value. To list more
// RegexPatternSet objects, submit another ListRegexPatternSets request, and
// specify the NextMarker value from the response in the NextMarker value in the
// next request.
NextMarker *string
// An array of RegexPatternSetSummary objects.
RegexPatternSets []types.RegexPatternSetSummary
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationListRegexPatternSetsMiddlewares(stack *middleware.Stack, options Options) (err error) |
func newServiceMetadataMiddleware_opListRegexPatternSets(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "waf",
OperationName: "ListRegexPatternSets",
}
}
| {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpListRegexPatternSets{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListRegexPatternSets{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRegexPatternSets(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
} |
DescribeCdnDeletedDomainsRequest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class DescribeCdnDeletedDomainsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeCdnDeletedDomains')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def | (self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | get_PageSize |
index.tsx | import React from 'react'
import { ICharacter } from '../../../services/types'
import CardTitle from '../../atoms/CardTitle'
import ItemDescription from '../../atoms/ItemDescription' | color?: string
char: ICharacter
}
const ItemCard: React.FC<IItemCard> = ({
color,
testID = 'ItemCard',
char
}) => (
<Container color={color}>
<CardTitle>{char?.name}</CardTitle>
<ItemDescription>{char?.status}</ItemDescription>
<ItemDescription>{char?.species}</ItemDescription>
<ItemDescription>{char?.gender}</ItemDescription>
<Image alt={char?.name} src={char?.image} />
</Container>
)
export default ItemCard | import { Container, Image } from './style'
export interface IItemCard {
testID?: string |
factory_test.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"sort"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
manualfake "k8s.io/client-go/rest/fake"
"k8s.io/client-go/restmapper"
testcore "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/categories"
"k8s.io/kubernetes/pkg/kubectl/resource"
)
func TestPortsForObject(t *testing.T) {
f := NewFactory(NewTestConfigFlags())
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{
{
ContainerPort: 101,
},
},
},
},
},
}
expected := sets.NewString("101")
ports, err := f.PortsForObject(pod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
got := sets.NewString(ports...)
if !expected.Equal(got) {
t.Fatalf("Ports mismatch! Expected %v, got %v", expected, got)
}
}
func TestProtocolsForObject(t *testing.T) {
f := NewFactory(NewTestConfigFlags())
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{
{
ContainerPort: 101,
Protocol: api.ProtocolTCP,
},
{
ContainerPort: 102,
Protocol: api.ProtocolUDP,
},
},
},
},
},
}
expected := sets.NewString("101/TCP", "102/UDP")
protocolsMap, err := f.ProtocolsForObject(pod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
protocolsString := kubectl.MakeProtocols(protocolsMap)
protocolsStrings := strings.Split(protocolsString, ",")
got := sets.NewString(protocolsStrings...)
if !expected.Equal(got) {
t.Fatalf("Protocols mismatch! Expected %v, got %v", expected, got)
}
}
func TestLabelsForObject(t *testing.T) {
f := NewFactory(NewTestConfigFlags())
tests := []struct {
name string
object runtime.Object
expected string
err error
}{
{
name: "successful re-use of labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", Labels: map[string]string{"svc": "test"}},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "svc=test",
err: nil,
},
{
name: "empty labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test", Labels: map[string]string{}},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
{
name: "nil labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "zen", Namespace: "test", Labels: nil},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
}
for _, test := range tests {
gotLabels, err := f.LabelsForObject(test.object)
if err != test.err {
t.Fatalf("%s: Error mismatch: Expected %v, got %v", test.name, test.err, err)
}
got := kubectl.MakeLabels(gotLabels)
if test.expected != got {
t.Fatalf("%s: Labels mismatch! Expected %s, got %s", test.name, test.expected, got)
}
}
}
func TestCanBeExposed(t *testing.T) |
func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList {
pods := []api.Pod{}
for i := 0; i < count; i++ {
newPod := api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i+1),
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC),
Labels: labels,
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
}
pods = append(pods, newPod)
}
if isUnready > -1 && isUnready < count {
pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse
}
if isUnhealthy > -1 && isUnhealthy < count {
pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}}
}
return &api.PodList{
Items: pods,
}
}
func TestGetFirstPod(t *testing.T) {
labelSet := map[string]string{"test": "selector"}
tests := []struct {
name string
podList *api.PodList
watching []watch.Event
sortBy func([]*v1.Pod) sort.Interface
expected *api.Pod
expectedNum int
expectedErr bool
}{
{
name: "kubectl logs - two ready pods",
podList: newPodList(2, -1, -1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl logs - one unhealthy, one healthy",
podList: newPodList(2, -1, 1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - two ready pods",
podList: newPodList(2, -1, -1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - wait for ready pod",
podList: newPodList(1, 1, -1, labelSet),
watching: []watch.Event{
{
Type: watch.Modified,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
},
},
sortBy: func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 1,
},
}
for i := range tests {
test := tests[i]
fake := fake.NewSimpleClientset(test.podList)
if len(test.watching) > 0 {
watcher := watch.NewFake()
for _, event := range test.watching {
switch event.Type {
case watch.Added:
go watcher.Add(event.Object)
case watch.Modified:
go watcher.Modify(event.Object)
}
}
fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil))
}
selector := labels.Set(labelSet).AsSelector()
pod, numPods, err := GetFirstPod(fake.Core(), metav1.NamespaceDefault, selector.String(), 1*time.Minute, test.sortBy)
pod.Spec.SecurityContext = nil
if !test.expectedErr && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.expectedErr && err == nil {
t.Errorf("%s: expected an error", test.name)
continue
}
if test.expectedNum != numPods {
t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
continue
}
if !apiequality.Semantic.DeepEqual(test.expected, pod) {
t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
}
}
}
func TestMakePortsString(t *testing.T) {
tests := []struct {
ports []api.ServicePort
useNodePort bool
expectedOutput string
}{
{ports: nil, expectedOutput: ""},
{ports: []api.ServicePort{}, expectedOutput: ""},
{ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
},
expectedOutput: "tcp:80",
},
{ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 8080,
Protocol: "UDP",
},
{
Port: 9000,
Protocol: "TCP",
},
},
expectedOutput: "tcp:80,udp:8080,tcp:9000",
},
{ports: []api.ServicePort{
{
Port: 80,
NodePort: 9090,
Protocol: "TCP",
},
{
Port: 8080,
NodePort: 80,
Protocol: "UDP",
},
},
useNodePort: true,
expectedOutput: "tcp:9090,udp:80",
},
}
for _, test := range tests {
output := makePortsString(test.ports, test.useNodePort)
if output != test.expectedOutput {
t.Errorf("expected: %s, saw: %s.", test.expectedOutput, output)
}
}
}
func fakeClient() resource.FakeClientFunc {
return func(version schema.GroupVersion) (resource.RESTClient, error) {
return &manualfake.RESTClient{}, nil
}
}
func TestDiscoveryReplaceAliases(t *testing.T) {
tests := []struct {
name string
arg string
expected string
}{
{
name: "no-replacement",
arg: "service",
expected: "service",
},
{
name: "all-replacement",
arg: "all",
expected: "pods,replicationcontrollers,services,statefulsets.apps,horizontalpodautoscalers.autoscaling,jobs.batch,cronjobs.batch,daemonsets.extensions,deployments.extensions,replicasets.extensions",
},
{
name: "alias-in-comma-separated-arg",
arg: "all,secrets",
expected: "pods,replicationcontrollers,services,statefulsets.apps,horizontalpodautoscalers.autoscaling,jobs.batch,cronjobs.batch,daemonsets.extensions,deployments.extensions,replicasets.extensions,secrets",
},
}
ds := &fakeDiscoveryClient{}
mapper := restmapper.NewShortcutExpander(testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), ds)
b := resource.NewFakeBuilder(fakeClient(), mapper, categories.LegacyCategoryExpander)
for _, test := range tests {
replaced := b.ReplaceAliases(test.arg)
if replaced != test.expected {
t.Errorf("%s: unexpected argument: expected %s, got %s", test.name, test.expected, replaced)
}
}
}
| {
factory := NewFactory(NewTestConfigFlags())
tests := []struct {
kind schema.GroupKind
expectErr bool
}{
{
kind: api.Kind("ReplicationController"),
expectErr: false,
},
{
kind: api.Kind("Node"),
expectErr: true,
},
}
for _, test := range tests {
err := factory.CanBeExposed(test.kind)
if test.expectErr && err == nil {
t.Error("unexpected non-error")
}
if !test.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
}
} |
client.py | from logging import disable
import os
import signal
import asyncio
import uuid
import aiohttp
import functools
from typing import List, Optional
from fastapi import FastAPI
import websockets
from opal_common.logger import logger, configure_logs
from opal_common.middleware import configure_middleware
from opal_common.config import opal_common_config
from opal_common.security.sslcontext import get_custom_ssl_context
from opal_common.authentication.verifier import JWTVerifier
from opal_common.authentication.deps import JWTAuthenticator
from opal_client.policy_store.api import init_policy_store_router
from opal_client.config import PolicyStoreTypes, opal_client_config
from opal_client.data.api import init_data_router
from opal_client.data.updater import DataUpdater
from opal_client.data.fetcher import DataFetcher
from opal_client.policy_store.base_policy_store_client import BasePolicyStoreClient
from opal_client.policy_store.policy_store_client_factory import PolicyStoreClientFactory
from opal_client.opa.runner import OpaRunner
from opal_client.opa.options import OpaServerOptions
from opal_client.policy.api import init_policy_router
from opal_client.policy.updater import PolicyUpdater
from opal_client.callbacks.register import CallbacksRegister
from opal_client.callbacks.api import init_callbacks_api
class OpalClient:
def __init__(
self,
policy_store_type:PolicyStoreTypes=None,
policy_store:BasePolicyStoreClient=None,
data_updater:DataUpdater=None,
data_topics: List[str] = None,
policy_updater:PolicyUpdater=None,
inline_opa_enabled:bool=None,
inline_opa_options:OpaServerOptions=None,
verifier: Optional[JWTVerifier] = None,
) -> None:
"""
Args:
policy_store_type (PolicyStoreTypes, optional): [description]. Defaults to POLICY_STORE_TYPE.
Internal components (for each pass None for default init, or False to disable):
policy_store (BasePolicyStoreClient, optional): The policy store client. Defaults to None.
data_updater (DataUpdater, optional): Defaults to None.
policy_updater (PolicyUpdater, optional): Defaults to None.
"""
# defaults
policy_store_type: PolicyStoreTypes = policy_store_type or opal_client_config.POLICY_STORE_TYPE
inline_opa_enabled: bool = inline_opa_enabled or opal_client_config.INLINE_OPA_ENABLED
inline_opa_options: OpaServerOptions = inline_opa_options or opal_client_config.INLINE_OPA_CONFIG
opal_client_identifier: str = opal_client_config.OPAL_CLIENT_STAT_ID or f"CLIENT_{uuid.uuid4().hex}"
# set logs
configure_logs()
# Init policy store client
self.policy_store_type: PolicyStoreTypes = policy_store_type
self.policy_store: BasePolicyStoreClient = policy_store or PolicyStoreClientFactory.create(policy_store_type)
# data fetcher
self.data_fetcher = DataFetcher()
# callbacks register
if hasattr(opal_client_config.DEFAULT_UPDATE_CALLBACKS, 'callbacks'):
default_callbacks = opal_client_config.DEFAULT_UPDATE_CALLBACKS.callbacks
else:
default_callbacks = []
self._callbacks_register = CallbacksRegister(default_callbacks)
# Init policy updater
if policy_updater is not None:
self.policy_updater = policy_updater
else:
self.policy_updater = PolicyUpdater(policy_store=self.policy_store, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
# Data updating service
if opal_client_config.DATA_UPDATER_ENABLED:
if data_updater is not None:
self.data_updater = data_updater
else:
data_topics = data_topics if data_topics is not None else opal_client_config.DATA_TOPICS
self.data_updater = DataUpdater(policy_store=self.policy_store, data_topics=data_topics, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
else:
self.data_updater = None
# Internal services
# Policy store
if self.policy_store_type == PolicyStoreTypes.OPA and inline_opa_enabled:
rehydration_callbacks = [
# refetches policy code (e.g: rego) and static data from server
functools.partial(self.policy_updater.update_policy, force_full_update=True),
]
if self.data_updater:
rehydration_callbacks.append(
functools.partial(self.data_updater.get_base_policy_data, data_fetch_reason="policy store rehydration")
)
self.opa_runner = OpaRunner.setup_opa_runner(options=inline_opa_options, rehydration_callbacks=rehydration_callbacks)
else:
self.opa_runner = False
custom_ssl_context = get_custom_ssl_context()
if opal_common_config.CLIENT_SELF_SIGNED_CERTIFICATES_ALLOWED and custom_ssl_context is not None:
logger.warning("OPAL client is configured to trust self-signed certificates")
if verifier is not None:
self.verifier = verifier
else:
self.verifier = JWTVerifier(
public_key=opal_common_config.AUTH_PUBLIC_KEY,
algorithm=opal_common_config.AUTH_JWT_ALGORITHM,
audience=opal_common_config.AUTH_JWT_AUDIENCE,
issuer=opal_common_config.AUTH_JWT_ISSUER,
)
if not self.verifier.enabled:
logger.info("API authentication disabled (public encryption key was not provided)")
# init fastapi app
self.app: FastAPI = self._init_fast_api_app()
def _init_fast_api_app(self):
"""
inits the fastapi app object
"""
app = FastAPI(
title="OPAL Client",
description="OPAL is an administration layer for Open Policy Agent (OPA), detecting changes" + \
" to both policy and data and pushing live updates to your agents. The opal client is" + \
" deployed alongside a policy-store (e.g: OPA), keeping it up-to-date, by connecting to" + \
" an opal-server and subscribing to pub/sub updates for policy and policy data changes.",
version="0.1.0"
)
configure_middleware(app)
self._configure_api_routes(app)
self._configure_lifecycle_callbacks(app)
return app
def _configure_api_routes(self, app: FastAPI):
"""
mounts the api routes on the app object
"""
authenticator = JWTAuthenticator(self.verifier)
# Init api routers with required dependencies
policy_router = init_policy_router(policy_updater=self.policy_updater)
data_router = init_data_router(data_updater=self.data_updater)
policy_store_router = init_policy_store_router(authenticator)
callbacks_router = init_callbacks_api(authenticator, self._callbacks_register)
# mount the api routes on the app object
app.include_router(policy_router, tags=["Policy Updater"])
app.include_router(data_router, tags=["Data Updater"])
app.include_router(policy_store_router, tags=["Policy Store"])
app.include_router(callbacks_router, tags=["Callbacks"])
# top level routes (i.e: healthchecks)
@app.get("/healthcheck", include_in_schema=False)
@app.get("/", include_in_schema=False)
def healthcheck():
return {"status": "ok"}
return app
def _configure_lifecycle_callbacks(self, app: FastAPI):
"""
registers callbacks on app startup and shutdown.
on app startup we launch our long running processes (async tasks)
on the event loop. on app shutdown we stop these long running tasks.
"""
@app.on_event("startup")
async def startup_event():
asyncio.create_task(self.start_client_background_tasks())
@app.on_event("shutdown")
async def shutdown_event():
await self.stop_client_background_tasks()
return app
async def start_client_background_tasks(self):
"""
Launch OPAL client long-running tasks:
- Policy Store runner (e.g: Opa Runner)
- Policy Updater
- Data Updater
If there is a policy store to run, we wait until its up before launching dependent tasks.
"""
if self.opa_runner:
# runs the policy store dependent tasks after policy store is up
self.opa_runner.register_opa_initial_start_callbacks([self.launch_policy_store_dependent_tasks])
async with self.opa_runner:
await self.opa_runner.wait_until_done()
else:
# we do not run the policy store in the same container
# therefore we can immediately launch dependent tasks
await self.launch_policy_store_dependent_tasks()
async def stop_client_background_tasks(self):
"""
stops all background tasks (called on shutdown event)
"""
logger.info("stopping background tasks...")
# stopping opa runner
if self.opa_runner:
await self.opa_runner.stop()
# stopping updater tasks (each updater runs a pub/sub client)
logger.info("trying to shutdown DataUpdater and PolicyUpdater gracefully...")
tasks: List[asyncio.Task] = []
if self.data_updater:
tasks.append(asyncio.create_task(self.data_updater.stop()))
if self.policy_updater:
tasks.append(asyncio.create_task(self.policy_updater.stop()))
try:
await asyncio.gather(*tasks)
except Exception:
logger.exception("exception while shutting down updaters")
async def launch_policy_store_dependent_tasks(self):
try:
await self.maybe_init_healthcheck_policy()
except Exception:
logger.critical("healthcheck policy enabled but could not be initialized!")
self._trigger_shutdown()
return
try:
for task in asyncio.as_completed([self.launch_policy_updater(), self.launch_data_updater()]):
await task
except websockets.exceptions.InvalidStatusCode as err:
logger.error("Failed to launch background task -- {err}", err=repr(err))
self._trigger_shutdown()
async def maybe_init_healthcheck_policy(self):
|
def _trigger_shutdown(self):
"""
this will send SIGTERM (Keyboard interrupt) to the worker, making uvicorn
send "lifespan.shutdown" event to Starlette via the ASGI lifespan interface.
Starlette will then trigger the @app.on_event("shutdown") callback, which
in our case (self.stop_client_background_tasks()) will gracefully shutdown
the background processes and only then will terminate the worker.
"""
logger.info("triggering shutdown with SIGTERM...")
os.kill(os.getpid(), signal.SIGTERM)
async def launch_policy_updater(self):
if self.policy_updater:
async with self.policy_updater:
await self.policy_updater.wait_until_done()
async def launch_data_updater(self):
if self.data_updater:
async with self.data_updater:
await self.data_updater.wait_until_done() | """
This function only runs if OPA_HEALTH_CHECK_POLICY_ENABLED is true.
Puts the healthcheck policy in opa cache and inits the transaction log used by the policy.
If any action fails, opal client will shutdown.
"""
if not opal_client_config.OPA_HEALTH_CHECK_POLICY_ENABLED:
return # skip
healthcheck_policy_relpath = opal_client_config.OPA_HEALTH_CHECK_POLICY_PATH
here = os.path.abspath(os.path.dirname(__file__))
healthcheck_policy_path = os.path.join(here, healthcheck_policy_relpath)
if not os.path.exists(healthcheck_policy_path):
logger.error("Critical: OPA health-check policy is enabled, but cannot find policy at {path}", path=healthcheck_policy_path)
raise ValueError("OPA health check policy not found!")
try:
healthcheck_policy_code = open(healthcheck_policy_path, 'r').read()
except IOError as err:
logger.error("Critical: Cannot read healthcheck policy: {err}", err=repr(err))
raise
try:
await self.policy_store.init_healthcheck_policy(
policy_id=healthcheck_policy_relpath,
policy_code=healthcheck_policy_code,
data_updater_enabled=opal_client_config.DATA_UPDATER_ENABLED
)
except aiohttp.ClientError as err:
logger.error("Failed to connect to OPA agent while init healthcheck policy -- {err}", err=repr(err))
raise |
bigqueue.go | package bigqueue
import (
"errors"
"sync"
"time"
)
var (
// ErrInvalidArenaSize is returned when persisted arena size
// doesn't match with desired arena size
ErrInvalidArenaSize = errors.New("mismatch in arena size")
)
// Queue provides an interface to big, fast and persistent queue
type Queue interface {
IsEmpty() bool
Flush() error
Close() error
Enqueue([]byte) error
EnqueueString(string) error
Dequeue() error
Peek() ([]byte, error)
PeekString() (string, error)
}
// MmapQueue implements Queue interface
type MmapQueue struct {
conf *bqConfig
index *queueIndex
am *arenaManager
// using atomic to update these below
mutOps *atomicInt64
flushChan chan struct{}
done chan struct{}
quit chan struct{}
// The order of locks: hLock > tLock > am.Lock
// protects head
hLock sync.RWMutex
// protects tail
tLock sync.RWMutex
}
// NewMmapQueue constructs a new persistent queue
func NewMmapQueue(dir string, opts ...Option) (Queue, error) {
complete := false
// setup configuration
conf := newConfig()
for _, opt := range opts {
if err := opt(conf); err != nil { | }
// create queue index
index, err := newQueueIndex(dir)
if err != nil {
return nil, err
}
defer func() {
if !complete {
_ = index.close()
}
}()
// create arena manager
am, err := newArenaManager(dir, conf, index)
if err != nil {
return nil, err
}
defer func() {
if !complete {
_ = am.close()
}
}()
// ensure that the arena size, if queue had existed,
// matches with the given arena size
existingSize := index.getArenaSize()
if existingSize == 0 {
index.putArenaSize(conf.arenaSize)
} else if existingSize != conf.arenaSize {
return nil, ErrInvalidArenaSize
}
bq := &MmapQueue{
conf: conf,
am: am,
index: index,
mutOps: newAtomicInt64(0),
flushChan: make(chan struct{}, 100),
done: make(chan struct{}),
quit: make(chan struct{}),
}
// setup background thread to flush arenas periodically
if err := bq.setupBgFlush(); err != nil {
return nil, err
}
complete = true
return bq, nil
}
// IsEmpty returns true when queue is empty
func (q *MmapQueue) IsEmpty() bool {
q.hLock.RLock()
defer q.hLock.RUnlock()
q.tLock.RLock()
defer q.tLock.RUnlock()
return q.isEmpty()
}
// Flush syncs the in memory content of bigqueue to disk
// A read lock ensures that there is no writer which is what we want
func (q *MmapQueue) Flush() error {
q.hLock.RLock()
defer q.hLock.RUnlock()
q.tLock.RLock()
defer q.tLock.RUnlock()
return q.flush()
}
// Close will close index and arena manager
func (q *MmapQueue) Close() error {
q.hLock.Lock()
defer q.hLock.Unlock()
q.tLock.Lock()
defer q.tLock.Unlock()
// wait for quitting the background routine
q.quit <- struct{}{}
<-q.done
var retErr error
if err := q.am.close(); err != nil {
retErr = err
}
if err := q.index.close(); err != nil {
retErr = err
}
return retErr
}
// isEmpty is not thread safe and should be called only after acquiring necessary locks
func (q *MmapQueue) isEmpty() bool {
headAid, headOffset := q.index.getHead()
tailAid, tailOffset := q.index.getTail()
return headAid == tailAid && headOffset == tailOffset
}
// flush is not thread safe and should be called only after acquiring necessary locks
func (q *MmapQueue) flush() error {
if err := q.am.flush(); err != nil {
return err
}
if err := q.index.flush(); err != nil {
return err
}
q.mutOps.store(0)
return nil
}
// setupBgFlush sets up background go routine to periodically flush arenas
func (q *MmapQueue) setupBgFlush() error {
t := &time.Timer{
C: make(chan time.Time),
}
if q.conf.flushPeriod != 0 {
t = time.NewTimer(time.Duration(q.conf.flushPeriod))
}
go func() {
for {
if q.conf.flushPeriod != 0 {
if !t.Stop() {
<-t.C
}
t.Reset(time.Duration(q.conf.flushPeriod))
}
select {
case <-q.quit:
defer func() { q.done <- struct{}{} }()
return
case <-q.flushChan:
if q.mutOps.load() >= q.conf.flushMutOps {
q.Flush()
}
case <-t.C:
q.Flush()
}
}
}()
return nil
} | return nil, err
} |
main.go | // Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// go_marshal is a code generation utility for automatically generating code to
// marshal go data structures to memory.
//
// This binary is typically run as part of the build process, and is invoked by
// the go_marshal bazel rule defined in defs.bzl.
//
// See README.md.
package main
import (
"flag"
"fmt"
"os"
"strings"
"gvisor.dev/gvisor/tools/go_marshal/gomarshal"
)
var (
pkg = flag.String("pkg", "", "output package")
output = flag.String("output", "", "output file")
outputTest = flag.String("output_test", "", "output file for tests")
imports = flag.String("imports", "", "comma-separated list of extra packages to import in generated code")
)
func | () {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s <input go src files>\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
if len(flag.Args()) == 0 {
flag.Usage()
os.Exit(1)
}
if *pkg == "" {
flag.Usage()
fmt.Fprint(os.Stderr, "Flag -pkg must be provided.\n")
os.Exit(1)
}
var extraImports []string
if len(*imports) > 0 {
// Note: strings.Split(s, sep) returns s if sep doesn't exist in s. Thus
// we check for an empty imports list to avoid emitting an empty string
// as an import.
extraImports = strings.Split(*imports, ",")
}
g, err := gomarshal.NewGenerator(flag.Args(), *output, *outputTest, *pkg, extraImports)
if err != nil {
panic(err)
}
if err := g.Run(); err != nil {
panic(err)
}
}
| main |
sprites.js | //#include <mud/frame.h>
#include <frame/Api.h>
#include <gfx-pbr/Api.h>
#include <gfx-obj/Api.h>
#include <xx_three/xx_three.h>
#include <stl/vector.hpp>
#define CLUSTERED 1
void xx_sprites(Shell app, var parent, Dockbar dockbar)
{
var viewer = two.ui.scene_viewer(panel);
two.ui.orbit_controller(viewer);
//camera = new THREE.PerspectiveCamera(60, width / height, 1, 2100);
//camera.position.z = 1500;
//scene.fog = new THREE.Fog(0x000000, 1500, 2100);
var overlay = two.ui.scene_viewer(parent);
//cameraOrtho = new THREE.OrthographicCamera(- width / 2, width / 2, height / 2, - height / 2, 1, 10);
//cameraOrtho.position.z = 10;
// create sprites
var amount = 200;
var radius = 500;
Texture sprites0 = app.gfx.textures.file('sprite0.png');
var material = app.gfx.materials.create('sprite0'); var m = material;
//m.program = SpriteMaterial;
//m.sprite.color = sprites0;
}); // new THREE.SpriteMaterial({ map: texture });
{
//
//var width = material.map.image.width;
//var height = material.map.image.height;
//
//spriteTL = new THREE.Sprite(material);
//spriteTL.center.set(0.0, 1.0);
//spriteTL.scale.set(width, height, 1);
//sceneOrtho.add(spriteTL);
//
//spriteTR = new THREE.Sprite(material);
//spriteTR.center.set(1.0, 1.0);
//spriteTR.scale.set(width, height, 1);
//sceneOrtho.add(spriteTR);
//
//spriteBL = new THREE.Sprite(material);
//spriteBL.center.set(0.0, 0.0);
//spriteBL.scale.set(width, height, 1);
//sceneOrtho.add(spriteBL);
//
//spriteBR = new THREE.Sprite(material);
//spriteBR.center.set(1.0, 0.0);
//spriteBR.scale.set(width, height, 1);
//sceneOrtho.add(spriteBR);
//
//spriteC = new THREE.Sprite(material);
//spriteC.center.set(0.5, 0.5);
//spriteC.scale.set(width, height, 1);
//sceneOrtho.add(spriteC);
//
//updateHUDSprites();
}
Texture mapB = app.gfx.textures.file('sprite1.png');
Texture mapC = app.gfx.textures.file('sprite2.png');
//group = new THREE.Group();
var materialC = app.gfx.materials.create('sprite0'); var m = material;
});
//new THREE.SpriteMaterial({ map: mapC, color : 0xffffff, fog : true });
var materialB = app.gfx.materials.create('sprite0'); var m = material;
});
//new THREE.SpriteMaterial({ map: mapB, color: 0xffffff, fog: true });
for(var a = 0; a < amount; a++)
{
var x = Math.random() - 0.5;
var y = Math.random() - 0.5;
var z = Math.random() - 0.5;
| //material = materialB.clone();
}
else {
//material = materialC.clone();
material.solid.colour = two.hsl(0.5 * Math.random(), 0.75, 0.5);
material.base.uv0_offset = { -0.5, -0.5 };
material.base.uv0_scale = { 2.0, 2.0 };
}
//var sprite = new THREE.Sprite(material);
//
//sprite.position.set(x, y, z);
//sprite.position.normalize();
//sprite.position.multiplyScalar(radius);
//
//group.add(sprite);
}
//scene.add(group);
function updateHUDSprites = []() {
//var width = window.innerWidth / 2;
//var height = window.innerHeight / 2;
//
//spriteTL.position.set(-width, height, 1); // top left
//spriteTR.position.set(width, height, 1); // top right
//spriteBL.position.set(-width, -height, 1); // bottom left
//spriteBR.position.set(width, -height, 1); // bottom right
//spriteC.position.set(0, 0, 1); // center
};
var time = app.gfx.time;
/*
for(var i = 0, l = group.children.length; i < l; i++) {
var sprite = group.children[i];
var material = sprite.material;
var scale = Math.sin(time + sprite.position.x * 0.01) * 0.3 + 1.0;
var imageWidth = 1;
var imageHeight = 1;
if(material.map material.map.image material.map.image.width) {
imageWidth = material.map.image.width;
imageHeight = material.map.image.height;
}
sprite.material.rotation += 0.1 * (i / l);
sprite.scale.set(scale * imageWidth, scale * imageHeight, 1.0);
if(material.map != = mapC) {
material.opacity = Math.sin(time + sprite.position.x * 0.01) * 0.4 + 0.6;
}
}
group.rotation.x = time * 0.5;
group.rotation.y = time * 0.75;
group.rotation.z = time * 1.0;
renderer.clear();
renderer.render(scene, camera);
renderer.clearDepth();
renderer.render(sceneOrtho, cameraOrtho);
*/
} | var material = materialB;
if(z < 0) {
|
problem_2_1.rs | use regex;
use std::fmt;
use std::str::FromStr;
use crate::util;
struct PasswordPattern {
lower_char_limit: usize,
upper_char_limit: usize,
char: char,
password: String,
}
lazy_static! {
static ref PASSWORD_PATTERN_REGEX: regex::Regex =
regex::Regex::new(r"(\d+)-(\d+) (.): (.*)").unwrap();
}
impl FromStr for PasswordPattern {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let password_captures = PASSWORD_PATTERN_REGEX
.captures_iter(s)
.nth(0)
.ok_or_else(|| "Regex failed to match")?;
fn parse<T: FromStr>(
captures: ®ex::Captures,
idx: usize,
name: &str,
) -> Result<T, String>
where
T::Err: fmt::Display,
| ;
let (lower_char_limit, upper_char_limit, char, password) = (
parse(&password_captures, 1, "lower_char_limit")?,
parse(&password_captures, 2, "upper_char_limit")?,
parse(&password_captures, 3, "char")?,
parse(&password_captures, 4, "password")?,
);
Ok(Self {
lower_char_limit,
upper_char_limit,
char,
password,
})
}
}
pub fn problem_2_1() -> String {
let passwords = util::read("input/problem_2_input.txt")
.lines()
.map(|f| f.parse::<PasswordPattern>().unwrap())
.collect::<Vec<_>>();
return passwords
.iter()
.filter(|password_pattern| {
let matching_chars = password_pattern
.password
.chars()
.filter(|password_char| *password_char == password_pattern.char)
.count();
matching_chars <= password_pattern.upper_char_limit
&& matching_chars >= password_pattern.lower_char_limit
})
.count()
.to_string();
}
| {
captures
.get(idx)
.ok_or_else(|| format!("{} not found", name))?
.as_str()
.parse::<T>()
.map_err(|err| format!("{} parse failed: {}", name, err))
} |
logging.py | """Logging and Profiling
"""
import time as time_module
import datetime
#from anndata import logging
from . import settings
_VERBOSITY_LEVELS_FROM_STRINGS = {
'error': 0,
'warn': 1,
'info': 2,
'hint': 3,
}
def info(*args, **kwargs):
return msg(*args, v='info', **kwargs)
def error(*args, **kwargs):
args = ('Error:',) + args
return msg(*args, v='error', **kwargs)
def warn(*args, **kwargs):
args = ('WARNING:',) + args
return msg(*args, v='warn', **kwargs)
def hint(*args, **kwargs):
return msg(*args, v='hint', **kwargs)
def _settings_verbosity_greater_or_equal_than(v):
if isinstance(settings.verbosity, str):
settings_v = _VERBOSITY_LEVELS_FROM_STRINGS[settings.verbosity]
else:
settings_v = settings.verbosity
return settings_v >= v
def msg(*msg, v=4, time=False, memory=False, reset=False, end='\n',
no_indent=False, t=None, m=None, r=None):
"""Write message to logging output.
Log output defaults to standard output but can be set to a file
by setting `sc.settings.log_file = 'mylogfile.txt'`.
v : {'error', 'warn', 'info', 'hint'} or int, (default: 4)
0/'error', 1/'warn', 2/'info', 3/'hint', 4, 5, 6...
time, t : bool, optional (default: False)
Print timing information; restart the clock.
memory, m : bool, optional (default: Faulse)
Print memory information.
reset, r : bool, optional (default: False)
Reset timing and memory measurement. Is automatically reset
when passing one of ``time`` or ``memory``.
end : str (default: '\n')
Same meaning as in builtin ``print()`` function.
no_indent : bool (default: False)
Do not indent for ``v >= 4``.
"""
# variable shortcuts
if t is not None: time = t
if m is not None: memory = m
if r is not None: reset = r
if isinstance(v, str):
v = _VERBOSITY_LEVELS_FROM_STRINGS[v]
if v == 3: # insert "--> " before hints
msg = ('-->',) + msg
if v >= 4 and not no_indent:
msg = (' ',) + msg
if _settings_verbosity_greater_or_equal_than(v):
if not time and not memory and len(msg) > 0:
_write_log(*msg, end=end)
if reset:
try:
settings._previous_memory_usage, _ = get_memory_usage()
except:
pass
settings._previous_time = time_module.time()
if time:
elapsed = get_passed_time()
msg = msg + ('({})'.format(_sec_to_str(elapsed)),)
_write_log(*msg, end=end)
if memory:
_write_log(format_memory_usage(get_memory_usage()),
msg='' if time else msg, end=end)
m = msg # backwards compat
def _write_log(*msg, end='\n'):
"""Write message to log output, ignoring the verbosity level.
This is the most basic function.
Parameters
----------
*msg :
One or more arguments to be formatted as string. Same behavior as print
function.
"""
from .settings import logfile
if logfile == '':
print(*msg, end=end)
else:
out = ''
for s in msg:
out += str(s) + ' '
with open(logfile, 'a') as f:
f.write(out + end)
def _sec_to_str(t):
"""Format time in seconds.
Parameters
----------
t : int
Time in seconds.
"""
from functools import reduce
return "%d:%02d:%02d.%02d" % \
reduce(lambda ll, b: divmod(ll[0], b) + ll[1:],
[(t*100,), 100, 60, 60])
#print_memory_usage = logging.print_memory_usage
#get_memory_usage = logging.get_memory_usage
def | ():
now = time_module.time()
elapsed = now - settings._previous_time
settings._previous_time = now
return elapsed
def print_version_and_date():
from . import __version__
_write_log('Running epiScanpy', __version__, 'on {}.'.format(get_date_string()))
_DEPENDENCIES_NUMERICS = [
'anndata', # anndata actually shouldn't, but as long as it's in development
'numpy',
'scipy',
'pandas',
('sklearn', 'scikit-learn'),
'statsmodels',
('igraph', 'python-igraph'),
'louvain']
_DEPENDENCIES_PLOTTING = ['matplotlib', 'seaborn']
def _print_versions_dependencies(dependencies):
# this is not the same as the requirements!
for mod in dependencies:
mod_name = mod[0] if isinstance(mod, tuple) else mod
mod_install = mod[1] if isinstance(mod, tuple) else mod
try:
imp = __import__(mod_name)
print('{}=={}'.format(mod_install, imp.__version__), end=' ')
except (ImportError, AttributeError):
pass
print()
def print_versions():
"""Versions that might influence the numerical results.
Matplotlib and Seaborn are excluded from this.
"""
_print_versions_dependencies(['episcanpy'] + _DEPENDENCIES_NUMERICS)
def print_versions_dependencies_numerics():
"""Dependencies that might influence numerical results (computed data).
"""
print('Dependencies:', end=' ')
_print_versions_dependencies(_DEPENDENCIES_NUMERICS)
def print_versions_dependencies_plotting():
"""Dependencies that might influence plots (but not computed data).
"""
print('Dependencies:', end=' ')
_print_versions_dependencies(_DEPENDENCIES_PLOTTING)
def print_versions_dependencies_all():
"""All relevant dependencies.
"""
_print_versions_dependencies(
_DEPENDENCIES_NUMERICS + _DEPENDENCIES_PLOTTING)
def get_date_string():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
| get_passed_time |
daemon_dest.go | package daemon
import (
"io"
"github.com/containers/image/docker/reference"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
writer *io.PipeWriter
// Other state
committed bool // writer has been closed
}
// newImageDestination returns a types.ImageDestination for the specified image reference.
func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
if ref.ref == nil {
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost {
mustMatchRuntimeOS = false
}
c, err := newDockerClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "Error initializing docker engine client")
}
reader, writer := io.Pipe()
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
goroutineContext, goroutineCancel := context.WithCancel(context.Background())
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
Destination: tarfile.NewDestination(writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
committed: false,
}, nil
}
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) |
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
return d.mustMatchRuntimeOS
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *daemonImageDestination) Close() error {
if !d.committed {
logrus.Debugf("docker-daemon: Closing tar stream to abort loading")
// In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing.
// In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including
// https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the
// net/http version with native Context support in Go 1.7) do not always actually immediately cancel
// the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and
// return early if the context is canceled without terminating the goroutine at all.
// So we need this CloseWithError to terminate sending the HTTP request Body
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
// Whether that works or not, closing the PipeWriter seems desirable in any case.
d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()"))
}
d.goroutineCancel()
return nil
}
func (d *daemonImageDestination) Reference() types.ImageReference {
return d.ref
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit() error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.Destination.Commit(); err != nil {
return err
}
if err := d.writer.Close(); err != nil {
return err
}
d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine.
logrus.Debugf("docker-daemon: Waiting for status")
err := <-d.statusChannel
return err
}
| {
err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
defer func() {
logrus.Debugf("docker-daemon: sending done, status %v", err)
statusChannel <- err
}()
defer func() {
if err == nil {
reader.Close()
} else {
reader.CloseWithError(err)
}
}()
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
err = errors.Wrap(err, "Error saving image to docker engine")
return
}
defer resp.Body.Close()
} |
runTest.py | # Cast column to f64 before convert it to pandas
# This is a hack, use the assert_equal comparator when nulls is
# fully supported on cudf.sort_values
import json
import logging
import os
import re
import time
import blazingsql
from blazingsql import DataType
# import git
import numpy as np
import pandas as pd
from BlazingLogging import loggingHandler as lhandler
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
if ((Settings.execution_mode == ExecutionMode.FULL and
Settings.compare_res == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
print(Settings.execution_mode)
print(Settings.compare_res)
from pydrill.client import PyDrill
from pyspark.sql.session import SparkSession
class Result:
def __init__(self, columns, resultSet, resultBlz):
self.columns = columns
self.resultSet = resultSet
self.resultBlz = resultBlz
name = "blzlogging"
HANDLER = lhandler.logging_handler()
class loggerblz:
def __init__(self, query, error, totaltime):
self.query = query
self.error = error
self.totaltime = totaltime
class result:
def | (self, res_execution, error):
self.res_execution = res_execution
self.error = error
def logginghelper(name):
# logging.basicConfig(filename='example.txt',level=logging.DEBUG)
logging._defaultFormatter = logging.Formatter()
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(HANDLER)
return logger
def loggingClose(name):
HANDLER.log = []
def upcast_to_float(df):
for name in df.columns:
if np.issubdtype(df[name].dtype, np.bool_):
df[name] = df[name].astype(np.float32)
elif np.issubdtype(df[name].dtype, np.integer):
df[name] = df[name].astype(np.float64)
return df
def to_pandas_f64_engine(df, expected_types_list):
count = 0
for col in df.columns:
if count >= len(expected_types_list):
break
if expected_types_list[count] != np.dtype(object):
if df.shape[0] > 0:
if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(
df[col].dtype, np.datetime64
):
if np.issubdtype(expected_types_list[count], np.bool_):
df[col] = (
df[col].map({"true": 1.0, "false": 0.0}).astype(np.float32)
)
elif np.issubdtype(expected_types_list[count], np.datetime64):
df[col] = df[col].astype(expected_types_list[count])
else:
df[col] = pd.to_numeric(df[col], errors="coerce")
count = count + 1
return df
def get_null_constants(df):
null_values = {}
for col, dtype in df.dtypes.to_dict().items():
if np.issubdtype(dtype, np.datetime64):
null_values[col] = np.datetime64("nat")
elif np.issubdtype(dtype, np.number):
null_values[col] = np.nan
return null_values
def compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):
np.warnings.filterwarnings("ignore")
if pdf1.size == 0 and pdf2.size == 0:
return "Success"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
if pdf1.shape[0] == pdf2.shape[0]:
if pdf1.shape[1] == pdf2.shape[1]:
for name in pdf1.columns:
if pdf1[name].dtype == np.object:
pdf1[name] = pdf1[name].astype('string')
for name in pdf2.columns:
if pdf2[name].dtype == np.object:
pdf2[name] = pdf2[name].astype('string')
# Removing indexes, because those are considered when
# comparing with equals()
pdf1.reset_index(drop=True, inplace=True)
pdf2.reset_index(drop=True, inplace=True)
# Make the column labels equal as equals() also compare labels
orig_pdf2_labels = pdf2.columns.to_list()
pdf2.columns = pdf1.columns.to_list()
exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(
pdf2.select_dtypes(exclude=np.inexact)
)
# Restore labels
pdf2.columns = orig_pdf2_labels
tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)
tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)
if use_percentage:
relative_tolerance = acceptable_difference
absolute_tolerance = 0
else:
relative_tolerance = 0
absolute_tolerance = acceptable_difference
# np.allclose follows this formula:
# absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))
res = np.all(exac_comp) and np.allclose(
tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,
absolute_tolerance, equal_nan=True
)
if res:
return "Success"
else:
return "Fail: Different values"
else:
return (
"Fail: Different number of columns blzSQLresult: "
+ str(pdf1.shape[1])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[1])
)
else:
return (
"Fail: Different number of rows blzSQLresult: "
+ str(pdf1.shape[0])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[0])
)
def begins_with(col1, col2, exp):
return col1.startswith(exp) or col2.startswith(exp)
def compare_column_names(pdf1, pdf2):
if len(pdf1.columns) != len(pdf2.columns):
if pdf1.values.size == 0 and pdf2.values.size == 0:
return True
print("Different set of columns")
return False
for blzCol, drillCol in zip(
pdf1.columns.values.tolist(), pdf2.columns.values.tolist()
):
if blzCol != drillCol:
if (
begins_with(drillCol, blzCol, "EXPR") is False
and begins_with(drillCol, blzCol, "count(") is False
):
print("Different columns")
return False
return True
# NOTE kharoly percy william: NEVER CHANGE THE ORDER of these
# lines (the logger logic depends that we log first queryType and then queryId
# WARNING DO NOT CHANGE THE CALL ORDER IN THIS FUCTION!
def get_Branch():
branch = blazingsql.__branch_name__
return branch
def get_CommitHash():
commit = blazingsql.__version__
return commit
def get_QueryId(input_type, test_name, test_id):
query_id = (
str(input_type).upper()
+ "-"
+ str(get_codTest(test_name)).upper()
+ "-"
+ str(test_id)
)
return query_id
def get_resultId(resultComparisson):
result_id = 1
if resultComparisson != "Success":
result_id = 0
return result_id
def get_codTest(test_name):
switcher = {
"Aggregations without group by": "AGGWOGRBY",
"Coalesce": "COALESCE",
"Column Basis": "COLBAS",
"Bindable Alias": "BALIAS",
"Boolean": "BOOL",
"Case": "CASE",
"Cast": "CAST",
"Common Table Expressions": "COMTABLEX",
"Concat": "CONCAT",
"Count Distinct": "COUNTD",
"Count without group by": "COUNTWOGRBY",
"Cross join": "CROSSJOIN",
"Date": "DATE",
"DayOfWeek": "DAYOFWEEK",
"Dir": "DIR",
"File System Google Storage": "FSGS",
"Hdfs FileSystem": "FSHDFS",
"Hive FileSystem": "FSHIVE",
"File System Local": "FSLOCAL",
"File System S3": "FSS3",
"Full outer join": "FOUTJOIN",
"Group by": "GROUPBY",
"Group by without aggregations": "GRBYWOAGG",
"Inner join": "INNERJOIN",
"Left outer join": "LOUTJOIN",
"Like": "LIKE",
"Literal": "LITERAL",
"Nested Queries": "NESTEDQ",
"Non-EquiJoin Queries": "NEQUIJOIN",
"Order by": "ORDERBY",
"Predicates With Nulls": "PREDWNULLS",
"Round": "ROUND",
"Replace": "REPLACE",
"Simple Distribution From Local": "SIMPLEDIST",
"Smiles Test": "SMILES",
"Substring": "SUBSTRING",
"Tables from Pandas": "TBLPANDAS",
"Timestampdiff": "TIMESTAMPD",
"Timestamp": "TIMESTAMP",
"To_timestamp": "TO_TIMESTAMP",
"TPCH Queries": "TPCH",
"Config Options": "TPCH", # we want the same outputs as the tpch test
"Unary ops": "UNARYOPS",
"Unify Tables": "UNIFYTBL",
"Union": "UNION",
"Limit": "LIMIT",
"Where clause": "WHERE",
"Wild Card": "WILDCARD",
"Simple String": "SSTRING",
"String case": "STRINGCASE",
"Message Validation": "MESSAGEVAL"
}
return switcher.get(test_name)
def print_fixed_log(
logger,
test_name,
input_type,
test_id,
sql,
resultComparisson,
error_message,
load_time,
engine_time,
total_time,
):
commitHash = get_CommitHash()
branchName = get_Branch()
# dateNow=datetime.now()
inputType = cs.get_extension(input_type)
logger.info(get_QueryId(inputType, test_name, test_id)) # QueryID
logger.info(Settings.dateNow) # TimeStamp
logger.info(test_name) # TestGroup
logger.info(inputType) # InputType
logger.info(sql) # Query
logger.info(get_resultId(resultComparisson)) # Result
logger.info(error_message) # Error
logger.info(branchName) # PR
logger.info(commitHash) # CommitHash
logger.info(Settings.data["RunSettings"]["nRals"])
logger.info(Settings.data["RunSettings"]["nGPUs"])
logger.info(Settings.data["TestSettings"]["dataDirectory"])
logger.info(test_id)
logger.info(load_time)
logger.info(engine_time)
logger.info(total_time)
def print_query_results(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
):
if print_result:
print("#BLZ:")
print(pdf1)
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
print("#DRILL:")
else:
print("#PYSPARK:")
print(pdf2)
else:
if engine=="drill":
print("#DRILL:")
else:
print("#PYSPARK:")
data_type = cs.get_extension(input_type)
print(str(queryId) + " Test " + queryType + " - " + data_type)
print("#QUERY:")
print(sql)
print("RESULT:")
error_message = ""
stringResult = ""
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults:
columnNamesComparison = compare_column_names(pdf1, pdf2)
if columnNamesComparison is not True:
print("Columns:")
print(pdf1.columns)
print(pdf2.columns)
error_message = "Column names are not the same"
print("ERROR:")
print(error_message)
resultComparisson = compare_results(
pdf1, pdf2, acceptable_difference, use_percentage, engine
)
if resultComparisson != "Success":
error_message = resultComparisson[6:]
print("ERROR:")
print(error_message)
stringResult = resultComparisson
if resultComparisson != "Success" or columnNamesComparison is False:
stringResult = "Fail"
else:
stringResult = "Success"
print(stringResult)
print("TOTAL TIME: ")
print(total_time)
print("CRASHED NODES: ")
# print(resultgdf.n_crashed_nodes)
print("TOTAL NODES: ")
# print(resultgdf.total_nodes)
print("===================================================")
logger = logginghelper(name)
# TODO percy kharoly bindings we need to get the number from internal api
# print_fixed_log(logger, queryType, queryId, sql, stringResult,
# error_message, 1, 1, 2)
print_fixed_log(
logger,
queryType,
input_type,
queryId,
sql,
stringResult,
error_message,
load_time,
engine_time,
total_time,
)
def print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
result = validate_messages(error_message, message_validation)
print(result)
print("ERROR:")
if result=="Fail":
print(error_message)
else:
error_message=""
print("CALCITE TIME: ")
print("-")
print("RAL TIME: ")
print("-")
print("EXECUTION TIME: ")
print("-")
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger, queryType, input_type, queryId, sql, result, error_message, None, None, None
)
def print_query_results_performance(sql, queryId, queryType, resultgdf):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = "Success"
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
" ",
resultgdf.calciteTime,
resultgdf.ralTime,
resultgdf.totalTime,
)
def print_query_results_dist(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
):
if print_result:
print("#BLZ:")
print(pdf1)
print("#DRILL:")
print(pdf2)
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = compare_results(
pdf1.values, pdf2.values, acceptable_difference, use_percentage
)
error_message = ""
if resultComparisson != "Success":
error_message = resultComparisson[6:]
resultComparisson = "Fail"
print(resultComparisson)
print("ERROR:")
print(error_message)
else:
print(resultComparisson)
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
error_message,
None,
None,
None,
)
class Test:
def __init__(self, test_name):
self.test_name = test_name
self.total = 0
self.success = 0
self.fail_ids = []
def save_log(gpu_ci_mode=False):
c = 1
cadena = []
subcadena = []
countPass = 0
countCrash = 0
for x in HANDLER.log:
if c < 17:
subcadena.append(x.msg)
c = c + 1
else:
c = 1
cadena.append(subcadena)
subcadena = []
subcadena.append(x.msg)
c = c + 1
print()
cadena.append(subcadena)
# If it didn't run any test (probably some were skipped)
# then return success
if cadena == [[]]:
return True, []
df = pd.DataFrame(
cadena,
columns=[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"TestId",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
],
)
total = df.shape[0]
countPass = df[df.Result == 1].count()["Result"]
df1 = df[
[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
]
].copy()
create_summary_detail(df, gpu_ci_mode)
printSummary(countPass, countCrash, total, gpu_ci_mode)
if not gpu_ci_mode:
saveLogInFile(df1)
saveLog = False
if "saveLog" in Settings.data["RunSettings"]:
saveLog = Settings.data["RunSettings"]["saveLog"]
print("saveLog = " + str(saveLog))
# TODO william kharoly felipe we should try to enable and use
# this function in the future
# result, error_msgs = verify_prev_google_sheet_results(df1)
result, error_msgs = True, []
if result is True and saveLog == "true":
saving_google_sheet_results(df1)
else:
if countPass < total:
result, error_msgs = False, []
else:
result, error_msgs = True, []
loggingClose(name)
return result, error_msgs
def create_summary_detail(df, no_color):
pdf = df
pdf["Result"] = df["Result"].replace(1, "Success")
pdf["Result"] = df["Result"].replace(0, "Fail")
# making boolean series for a team name
filter_fail = pdf["Result"] == "Fail"
# filtering data
pdf2 = pdf.where(filter_fail)
pdf_fail = pdf2.dropna()
if no_color:
green = ""
yellow = ""
# red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
# red = bcolors.FAIL
endc = bcolors.ENDC
# display
print(green + "========================================================")
print("DETAILED SUMMARY TESTS")
print("========================================================" + endc)
pd.set_option("max_rows", 1500)
print(pdf.groupby(["TestGroup", "InputType"])["Result"].value_counts())
print(yellow + "========================================================")
print("FAILED TESTS" + yellow)
print("========================================================" + endc)
# pd.set_option('max_columns', 5)
# pd.set_option('max_colwidth', 1000)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 2000)
pd.set_option("display.float_format", "{:20,.2f}".format)
pd.set_option("display.max_colwidth", None)
print(
pdf_fail.groupby(["TestGroup", "InputType", "Result"])["TestId"]
.apply(",".join)
.reset_index()
)
# This function use the google spreadsheet to compare the current results
# against historic ones
# Returns a tuple with 2 entries:
# 1st element: False in case gpuci should be fail, True otherwise
# 2nd element: A list of error messages (in case 1st element is False)
# Example:
# result, error_msgs = verify_prev_google_sheet_results(log_pdf)
# if result == False:
# exits the python process and do not move to next steps
# TODO william kharoly felipe we should try to enable and use
# this function in the future
def _verify_prev_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def get_the_data_from_sheet():
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
# current_dir = "/home/ubuntu/.conda/envs/e2e"
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will not
be compared against old results from Google Docs. Define
the env var BLAZINGSQL_E2E_LOG_INFO"""
)
return None
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(
log_info, scope
)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
sheet_blazing = client_blazing.open("BSQL End-to-End Tests").worksheet(
work_sheet
)
# Writing log results into Blazing sheet
ret = pd.DataFrame(sheet_blazing.get_all_records())
# NOTE percy kharo william we need to patch these columns
# before convert to parquet
ret["LoadingTime"] = ret["LoadingTime"].astype(str)
ret["EngineTotalTime"] = ret["EngineTotalTime"].astype(str)
ret["TotalTime"] = ret["TotalTime"].astype(str)
return ret
dir_log = Settings.data["TestSettings"]["logDirectory"]
gspreadCacheHint = Settings.data["RunSettings"]["gspreadCacheHint"]
gspread_e2e_cache_path = dir_log + "/e2e-gspread-cache.parquet"
gspread_df = None
if gspreadCacheHint == "false":
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
# Always save a cache (so when gspreadCacheHint
# is false will refresh the cache)
gspread_df.to_parquet(gspread_e2e_cache_path)
elif gspreadCacheHint == "true":
if os.path.isfile(gspread_e2e_cache_path):
gspread_df = pd.read_parquet(gspread_e2e_cache_path)
else:
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
gspread_df.to_parquet(gspread_e2e_cache_path)
if gspread_df is None:
error_msg = """ERROR: This test run could not be compared
against old results from Google Docs"""
return False, [error_msg]
log_pdf_copy = log_pdf.copy()
prev_nrals = gspread_df["nRALS"][0]
curr_nrals = Settings.data["RunSettings"]["nRals"]
# Assume prev_nrals == curr_nrals
last_e2e_run_id = gspread_df["Timestamp"][0]
# NOTE If prev_nrals != curr_nrals we need to search the first
# Timestamp (a.k.a ID) for the current nRals target
if prev_nrals != curr_nrals:
gspread_df_uniques = gspread_df.drop_duplicates()
gspread_df_uniques_target_nrals = gspread_df_uniques.loc[
gspread_df_uniques["nRALS"] == curr_nrals
]
last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[
0, 1
] # select the first Timestamp from the unique values
print(
"####### ======= >>>>>>> E2E INFO: We will compare the"
+ " current run against the ID (Timestamp): "
+ last_e2e_run_id
)
last_e2e_run_df = gspread_df.loc[gspread_df["Timestamp"] == last_e2e_run_id]
# NOTE percy kharo william we need to rename some columns to use our dfs
log_pdf_copy = log_pdf_copy.rename(
columns={
"TestGroup": "Test Group",
"InputType": "Input Type",
"nRals": "nRALS",
"DataDirectory": "data_dir",
}
)
# NOTE For debugging
# log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)
# log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',
# compression='GZIP')
# log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')
error_msgs = []
prev_summary = last_e2e_run_df.groupby("Test Group").count()
curr_summary = log_pdf_copy.groupby("Test Group").count()
prev_test_groups = prev_summary.index.tolist()
curr_test_groups = curr_summary.index.tolist()
has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)
# Check if someone deleted some tests
# (there more test groups in the sheet)
if has_less_test_groups:
list_difference = [
item for item in prev_test_groups if item not in curr_test_groups
]
error_msg = (
"ERROR: current e2e has less test groups than"
+ " previous run, delta is %s" % list_difference
)
error_msgs.append(error_msg)
# Just check the common test groups
if has_less_test_groups:
test_groups = curr_test_groups
else:
test_groups = prev_test_groups
for test_group in test_groups:
prev_test_group_df = last_e2e_run_df.loc[
last_e2e_run_df["Test Group"] == test_group
]
prev_input_types = (
prev_test_group_df.groupby("Input Type").count().index.tolist()
)
curr_test_group_df = log_pdf_copy.loc[log_pdf_copy["Test Group"] == test_group]
cur_input_typ = curr_test_group_df.groupby("Input Type").count().index.tolist()
has_less_input_types = len(prev_input_types) > len(cur_input_typ)
if has_less_input_types is True:
list_difference = [
item for item in prev_input_types if item not in cur_input_typ
]
error_msg = """ERROR: current test group %s has less
input types cases, delta is %s""" % (
test_group,
list_difference,
)
error_msgs.append(error_msg)
for input_type in prev_input_types:
prev_tests_df = prev_test_group_df.loc[
prev_test_group_df["Input Type"] == input_type
]
prev_tests_df.sort_values(by=["QueryID"])
curr_tests_df = curr_test_group_df.loc[
curr_test_group_df["Input Type"] == input_type
]
curr_tests_df.sort_values(by=["QueryID"])
# We need to make a copy since we are going to drop some row
prev_tests_df = prev_tests_df.copy()
curr_tests_df = curr_tests_df.copy()
# NOTE for debugging
# print("============================================PREV!")
# print(prev_tests_df.head())
# print(len(prev_tests_df))
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!")
# print(curr_tests_df.head())
# print(len(curr_tests_df))
# Check if current run has less tests than previous run
len_prev_tests_df = len(prev_tests_df)
len_curr_tests_df = len(curr_tests_df)
has_less_tests = len_prev_tests_df > len_curr_tests_df
# NOTE for debugging
# print("====== PREV TESTS ======")
# print(prev_tests_df)
# print("====== CURR TESTS ======")
# print(curr_tests_df)
if has_less_tests:
prev_tests = prev_tests_df["QueryID"].tolist()
curr_tests = curr_tests_df["QueryID"].tolist()
list_difference = [
item for item in prev_tests if item not in curr_tests
]
error_msg = """ERROR: The test group %s has less tests than
previous run for input type %s, delta is %s""" % (
test_group,
input_type,
list_difference,
)
error_msgs.append(error_msg)
n = len_prev_tests_df - len_curr_tests_df
prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)
elif len_prev_tests_df < len_curr_tests_df:
n = len_curr_tests_df - len_prev_tests_df
curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)
prev_tests_results = prev_tests_df["Result"].to_list()
curr_tests_results = curr_tests_df["Result"].to_list()
for i in range(0, len(prev_tests_results)):
prev_test_result = prev_tests_results[i]
curr_test_result = curr_tests_results[i]
if prev_test_result == 1 and curr_test_result == 0:
error_msg = """ERROR: Test %d for %s (%s) is now failing
but before was ok!""" % (
i + 1,
test_group,
input_type,
)
error_msgs.append(error_msg)
succs = len(error_msgs) == 0
return succs, error_msgs
def saving_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will
not save its results into the Google spreadsheet."""
)
return
# Create an empty list
log_list = []
# Iterate over each row
for index, rows in log_pdf.iterrows():
# Create a list for the current row (ADDS)
current_list = [
rows.QueryID,
str(rows.TimeStamp),
str(rows.TestGroup),
rows.InputType,
rows.Query,
rows.Result,
rows.Error,
rows.Branch,
str(rows.CommitHash),
rows.nRals,
rows.nGPUs,
rows.DataDirectory,
rows.LoadingTime,
rows.EngineTotalTime,
rows.TotalTime,
]
# append the list to the final list
log_list.append(current_list)
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# === 1. BlazingSQL =====
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
current_dir = "/home/ubuntu/.conda/envs/e2e"
print(current_dir)
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
blaz_googlesheat = client_blazing.open("BSQL End-to-End Tests")
sheet_blazing = blaz_googlesheat.worksheet(work_sheet)
# Writing log results into Blazing sheet
total_queries = len(log_list)
for i in range(0, total_queries):
sheet_blazing.append_row(log_list[i])
time.sleep(1)
print("\nTable was uptdated into Blazing Google SpreadSheet")
def saveLogInFile(df):
dir_log = Settings.data["TestSettings"]["logDirectory"]
filepath = getFileName(dir_log)
df.to_excel(filepath, index=False)
def validate_messages(error_message, message_validation):
error_message = error_message.replace('\n', ' ').replace('\r', ' ')
message_validation = message_validation.replace('\n', ' ').replace('\r', ' ')
error_message = error_message.replace(' ', '')
message_validation = message_validation.replace(' ', '')
if error_message == message_validation:
result = "Success"
else:
result = "Fail"
return result
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def on_jenkins():
# NOTE For more env vars see
# https://wiki.jenkins.io/display/JENKINS/Building+a+software+project
jenkins_job = os.environ.get("JOB_NAME")
if jenkins_job is not None:
return True
return False
def print_tests(tests, onlyFails=False):
print(
"""************************************************************
*******************"""
)
tab = " "
failedPrefix = ""
if onlyFails:
failedPrefix = "FAILED"
# TODO percy check None
for extension in tests:
if onlyFails:
if extension == "parquet":
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!"
)
else:
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!!!!!"
)
else:
if extension == "parquet":
print("################ " + extension + " TESTS ############")
else:
print("############## " + extension + " TESTS ##############")
testNames = tests.get(extension)
for testName in testNames:
test = testNames.get(testName)
total = test.get("total")
countPass = test.get("countPass")
countCrash = test.get("countCrash")
failIds = test.get("failIds")
showTest = False
if onlyFails:
if len(failIds) > 0:
showTest = True
print(tab + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
else:
showTest = True
print(tab + "++++++++++++++++++++++++++++++++")
if showTest:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# don't use colors since jenkins doesn't support ansi chars
if on_jenkins():
green = ""
yellow = ""
red = ""
endc = ""
print(
tab
+ "SUMMARY for "
+ failedPrefix
+ " test suite: "
+ testName
+ " - "
+ extension
)
if not onlyFails:
pass_green = green
pass_endc = endc
if (
countPass != total
): # if no full pass then don't use green colors here
pass_green = ""
pass_endc = ""
print(
pass_green
+ tab
+ "PASSED: "
+ str(countPass)
+ "/"
+ str(total)
+ pass_endc
)
fails = total - countPass - countCrash
yellow_fail = yellow
yellow_endc = endc
if fails == 0:
yellow_fail = ""
yellow_endc = ""
print(
yellow_fail
+ tab
+ "FAILED: "
+ str(fails)
+ "/"
+ str(total)
+ " "
+ str(failIds)
+ yellow_endc
)
red_crash = red
red_endc = endc
# if no crashes then don't use red colors here
if countCrash == 0:
red_crash = ""
red_endc = ""
print(
red_crash
+ tab
+ "CRASH: "
+ str(countCrash)
+ "/"
+ str(total)
+ red_endc
)
if not onlyFails:
print(tab + "TOTAL: " + str(total))
def printSummary(countPass, countCrash, total, no_color):
if no_color:
green = ""
yellow = ""
red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# Second: print the global summary (totals from all the tests)
fails = total - countPass - countCrash
print(
"""**********************************************************
*********************"""
)
print("TOTAL SUMMARY for test suite: ")
print(green + "PASSED: " + str(countPass) + "/" + str(total) + endc)
print(yellow + "FAILED: " + str(fails) + "/" + str(total) + endc)
print(red + "CRASH: " + str(countCrash) + "/" + str(total) + endc)
print("TOTAL: " + str(total))
def getFileName(dir_log):
fecha = time.strftime("%H%M%S")
hora = time.strftime("%I%M%S")
return dir_log + "LogTest" + fecha + hora + ".xlsx" #
# ===========================================================================
tableNames = [
"customer",
"orders",
"supplier",
"lineitem",
"part",
"partsupp",
"nation",
"region",
"perf",
"acq",
"names",
"bool_orders",
"web_site",
"web_sales",
"web_returns",
"web_page",
"web_clickstreams",
"warehouse",
"time_dim",
"store_sales",
"store_returns",
"store",
"ship_mode",
"reason",
"promotion",
"product_reviews",
"item_marketprices",
"item",
"inventory",
"income_band",
"household_demographics",
"date_dim",
"customer_demographics",
"customer_address",
"customer",
"split",
"docked",
"smiles",
"dcoids",
]
def get_table_occurrences(query):
res = []
for name in tableNames:
if query.find(name) != -1:
res.append(name)
return res
def replace_all(text, dic):
for i, j in dic.items():
text = re.sub(r"\s%s(\s|$|\,)" % i, j, text)
return text
def get_blazingsql_query(db_name, query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " %(table)s " % {"table": db_name + "." + table_name}},
)
return new_query
def get_drill_query(query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query, {table_name: " dfs.tmp.`%(table)s` " % {"table": table_name}}
)
return new_query
# ================================================================================================================
def run_query_drill(drill, query_str):
timeout = 400
query_result = drill.query(query_str, timeout)
df = query_result.to_dataframe()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def run_query_spark(spark, query_str):
query_result = spark.sql(query_str)
df = query_result.toPandas()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def save_results_arrow(filename, pdf2):
# save results
import pyarrow as pa
table = pa.Table.from_pandas(pdf2)
# schema = pa.Schema.from_pandas(pdf2)
with open(filename, "bw") as f:
writer = pa.RecordBatchFileWriter(f, table.schema)
writer.write(table)
writer.close()
def save_results_parquet(filename, pdf2):
pdf2.to_parquet(filename, compression="GZIP")
def run_query(
bc,
engine,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
input_type,
**kwargs
):
print(query)
query_spark = kwargs.get("query_spark", query)
algebra = kwargs.get("algebra", "")
nRals = Settings.data["RunSettings"]["nRals"]
print_result = kwargs.get("print_result")
if print_result is None:
print_result = False
message_validation = kwargs.get("message_validation", "")
if message_validation is None:
message_validation = False
data_type = cs.get_extension(input_type)
if Settings.execution_mode != "Generator":
print(
"\n=============== New query: "
+ str(queryId)
+ " - "
+ data_type
+ " ================="
)
load_time = 0
engine_time = 0
total_time = 0
nested_query = kwargs.get("nested_query", False)
error_message = ""
if not nested_query:
# if int(nRals) == 1: # Single Node
query_blz = query # get_blazingsql_query('main', query)
if algebra == "":
start_time = time.time()
try:
result_gdf = bc.sql(query_blz)
except Exception as e:
error_message=str(e)
if not message_validation:
end_time = time.time()
total_time = (end_time - start_time) * 1000
# SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN
# duration ELSE 0 END) AS load_time,
# MAX(load_time) AS load_time,
# log_result = bc.log(
# """SELECT
# MAX(end_time) as end_time, query_id,
# MAX(total_time) AS total_time
# FROM (
# SELECT
# query_id, node_id,
# SUM(CASE WHEN info = 'Query Execution Done' THEN
# duration ELSE 0 END) AS total_time,
# MAX(log_time) AS end_time
# FROM
# bsql_logs
# WHERE
# info = 'evaluate_split_query load_data'
# OR info = 'Query Execution Done'
# GROUP BY
# node_id, query_id
# )
# GROUP BY
# query_id
# ORDER BY
# end_time DESC limit 1"""
# )
# if int(nRals) == 1: # Single Node
# n_log = log_result
# else: # Simple Distribution
# n_log = log_result.compute()
load_time = 0 # n_log['load_time'][0]
engine_time = 0 #n_log["total_time"][0]
else:
result_gdf = bc.sql(query_blz, algebra=algebra)
else: # for nested queries as column basis test
result_gdf = kwargs.get("blz_result", [])
str_code_test = str(get_codTest(queryType)).upper()
filename = str_code_test + "-" + str(queryId) + ".parquet"
result_dir = Settings.data["TestSettings"]["fileResultsDirectory"]
file_results_dir = str(result_dir)
if not message_validation== "":
print_query_results2(
query,
queryId,
input_type,
queryType,
error_message,
message_validation
)
elif not isinstance(engine, str):
if isinstance(engine, PyDrill):
# Drill
query_drill = get_drill_query(query)
result_drill_gd = run_query_drill(engine, query_drill)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_drill_gd.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "drill" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Drill: " + filename + " generated.")
else:
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
elif isinstance(engine, SparkSession):
# Spark
result_spark_df = run_query_spark(engine, query_spark)
if result_gdf is not None:
if result_gdf.columns is not None:
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_spark_df.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "spark" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Spark: " + filename + " generated.")
else:
print_query_results(
query_spark,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query_spark, queryId, queryType, result_gdf.error_message
)
else: # GPUCI
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults == "true":
resultFile = file_results_dir + "/" + str(engine) + "/" + filename
pdf2 = get_results(resultFile)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
format_pdf(pdf1, worder, orderBy)
print(pdf2)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
else:
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = pd.DataFrame()
formatResults(pdf1, pdf2, worder, orderBy)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
def run_query_log(
bc,
query,
queryId,
queryType,
**kwargs
):
result_gdf = None
error_message = ""
message_validation = ""
try:
result_gdf = bc.log(query)
except Exception as e:
error_message=str(e)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
else:
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
def run_query_performance(
bc,
drill,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
**kwargs
):
# Blazing
query_blz = query # get_blazingsql_query('main', query)
result_gdf = bc.sql(query_blz).get()
if result_gdf.error_message == "":
print_query_results_performance(query, queryId, queryType, result_gdf)
else:
print_query_results2(query, queryId, queryType, result_gdf.error_message)
def formatResults(pdf1, pdf2, worder, orderBy):
if worder == 1 and pdf1.size != 0 and pdf2.size != 0:
if len(pdf1.columns) == len(pdf2.columns):
pdf1.sort_values(
[orderBy] if orderBy else pdf1.columns.to_list(), inplace=True
)
pdf2.sort_values(
[orderBy] if orderBy else pdf2.columns.to_list(), inplace=True
)
def format_pdf(pdf, worder, orderBy):
if worder == 1 and pdf.size != 0:
pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)
def get_results(result_file):
df = pd.read_parquet(result_file)
return df
| __init__ |
interface.go | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package watch
import (
"k8s.io/apimachinery/pkg/runtime"
)
// Interface can be implemented by anything that knows how to watch and report changes.
type Interface interface {
// Stops watching. Will close the channel returned by ResultChan(). Releases
// any resources used by the watch.
Stop() | // watch should be completely cleaned up.
ResultChan() <-chan Event
}
// EventType defines the possible types of events.
type EventType string
const (
// Event type:
// Added
// * a new Object has been added. If the Watcher does not have a specific
// ResourceVersion to watch from, existing entries will first be listed
// and propagated as "Added" events.
// Modified
// * an Object has been modified.
// Deleted
// * an Object has been deleted
// Error
// * an error has occurred. If the error is terminating, the results channel
// will be closed.
Added EventType = "ADDED"
Modified EventType = "MODIFIED"
Deleted EventType = "DELETED"
Error EventType = "ERROR"
DefaultChanSize int32 = 100
)
// Event represents a single event to a watched resource.
type Event struct {
Type EventType
// Previous is:
// * If Type is Added, Error or Synced: nil
// * If Type is Modified or Deleted: the previous state of the object
// Object is:
// * If Type is Added or Modified: the new state of the object.
// * If Type is Deleted, Error or Synced: nil
Previous runtime.Object
Object runtime.Object
// The error, if EventType is Error.
Error error
} |
// Returns a chan which will receive all the events. If an error occurs
// or Stop() is called, this channel will be closed, in which case the |
steps-test.js | import {
moduleFor,
test | // Specify the other units that are required for this test.
// needs: ['controller:foo']
});
// Replace this with your real tests.
test('it exists', function(assert) {
var controller = this.subject();
assert.ok(controller);
}); | } from 'ember-qunit';
moduleFor('controller:steps', { |
tx_test.go | // Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package bsvutil_test
import (
"bytes"
"io"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/whatsonchain/bsvd/chaincfg/chainhash"
"github.com/whatsonchain/bsvutil"
)
// TestTx tests the API for Tx.
func TestTx(t *testing.T) {
testTx := Block100000.Transactions[0]
tx := bsvutil.NewTx(testTx)
// Ensure we get the same data back out.
if msgTx := tx.MsgTx(); !reflect.DeepEqual(msgTx, testTx) {
t.Errorf("MsgTx: mismatched MsgTx - got %v, want %v",
spew.Sdump(msgTx), spew.Sdump(testTx))
}
// Ensure transaction index set and get work properly.
wantIndex := 0
tx.SetIndex(0)
if gotIndex := tx.Index(); gotIndex != wantIndex {
t.Errorf("Index: mismatched index - got %v, want %v",
gotIndex, wantIndex)
}
// Hash for block 100,000 transaction 0.
wantHashStr := "8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87" | }
// Request the hash multiple times to test generation and caching.
for i := 0; i < 2; i++ {
hash := tx.Hash()
if !hash.IsEqual(wantHash) {
t.Errorf("Hash #%d mismatched hash - got %v, want %v", i,
hash, wantHash)
}
}
}
// TestNewTxFromBytes tests creation of a Tx from serialized bytes.
func TestNewTxFromBytes(t *testing.T) {
// Serialize the test transaction.
testTx := Block100000.Transactions[0]
var testTxBuf bytes.Buffer
err := testTx.Serialize(&testTxBuf)
if err != nil {
t.Errorf("Serialize: %v", err)
}
testTxBytes := testTxBuf.Bytes()
// Create a new transaction from the serialized bytes.
tx, err := bsvutil.NewTxFromBytes(testTxBytes)
if err != nil {
t.Errorf("NewTxFromBytes: %v", err)
return
}
// Ensure the generated MsgTx is correct.
if msgTx := tx.MsgTx(); !reflect.DeepEqual(msgTx, testTx) {
t.Errorf("MsgTx: mismatched MsgTx - got %v, want %v",
spew.Sdump(msgTx), spew.Sdump(testTx))
}
}
// TestTxErrors tests the error paths for the Tx API.
func TestTxErrors(t *testing.T) {
// Serialize the test transaction.
testTx := Block100000.Transactions[0]
var testTxBuf bytes.Buffer
err := testTx.Serialize(&testTxBuf)
if err != nil {
t.Errorf("Serialize: %v", err)
}
testTxBytes := testTxBuf.Bytes()
// Truncate the transaction byte buffer to force errors.
shortBytes := testTxBytes[:4]
_, err = bsvutil.NewTxFromBytes(shortBytes)
if err != io.EOF {
t.Errorf("NewTxFromBytes: did not get expected error - "+
"got %v, want %v", err, io.EOF)
}
} | wantHash, err := chainhash.NewHashFromStr(wantHashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err) |
matrix.rs | use na::{Const, DimMin, Scalar};
use crate::aliases::{TMat, TVec};
use crate::traits::{Number, RealNumber};
/// The determinant of the matrix `m`.
pub fn determinant<T: RealNumber, const D: usize>(m: &TMat<T, D, D>) -> T
where
Const<D>: DimMin<Const<D>, Output = Const<D>>,
|
/// The inverse of the matrix `m`.
pub fn inverse<T: RealNumber, const D: usize>(m: &TMat<T, D, D>) -> TMat<T, D, D> {
m.clone()
.try_inverse()
.unwrap_or_else(TMat::<T, D, D>::zeros)
}
/// Component-wise multiplication of two matrices.
pub fn matrix_comp_mult<T: Number, const R: usize, const C: usize>(
x: &TMat<T, R, C>,
y: &TMat<T, R, C>,
) -> TMat<T, R, C> {
x.component_mul(y)
}
/// Treats the first parameter `c` as a column vector and the second parameter `r` as a row vector and does a linear algebraic matrix multiply `c * r`.
pub fn outer_product<T: Number, const R: usize, const C: usize>(
c: &TVec<T, R>,
r: &TVec<T, C>,
) -> TMat<T, R, C> {
c * r.transpose()
}
/// The transpose of the matrix `m`.
pub fn transpose<T: Scalar, const R: usize, const C: usize>(x: &TMat<T, R, C>) -> TMat<T, C, R> {
x.transpose()
}
| {
m.determinant()
} |
crud3.py | from dbconn import Session, Departments, Employees
# 1. 创建一个会话实例
session = Session()
#################################
# 查询数据库,返回实体类的实例
# qset1 = session.query(Departments)
# print(qset1) # 此时只是一条SQL语句,不真正连接数据库
# print(list(qset1)) # 取值的时候,才会连接数据库
# for dep in qset1:
# print('部门ID: %s, 部门名称: %s' % (dep.dep_id, dep.dep_name))
#################################
# 如果查询某些字段,返回的是元组
# qset2 = session.query(Employees.emp_name, Employees.email)
# print(qset2) # qset2是SQL语句
# print(list(qset2)) # 取值是元组
#################################
# 排序,可以对执行结果进一步操作
# qset3 = session.query(Departments).order_by(Departments.dep_id)
# for dep in qset3:
# print(dep.dep_id, dep.dep_name)
#################################
# 排序,取切片
# qset4 = session.query(Departments).order_by(Departments.dep_id)[2:4]
# print(qset4) # 因为qset4执行了切片取值,所以它不是sql语句了
# for dep in qset4: | # qset5 = session.query(Employees).filter(Employees.dep_id==2)
# for emp in qset5:
# print(emp.emp_name, emp.email)
#################################
# 过滤,查找2号部门使用163邮箱的员工
# qset6 = session.query(Employees).filter(Employees.dep_id==2)\
# .filter(Employees.email.like('%163.com'))
# for emp in qset6:
# print(emp.emp_name, emp.email)
#################################
# all方法返回列表, first方法返回结果的第一项
# qset7 = session.query(Departments).order_by(Departments.dep_id)
# print(qset7.all())
# print(qset7.first())
# dep = qset7.first()
# print(dep.dep_id, dep.dep_name)
#################################
# 多表查询,查询员工所在部门
# qset8 = session.query(Employees.emp_name, Departments.dep_name)\
# .join(Departments)
# for item in qset8:
# print(item)
# 多表查询时,query的第一个参数是Employees.emp_name,join时要写Departments
# 如果query的第一个参数是Departments.dep_name, join时要写Employees
# qset9 = session.query(Departments.dep_name, Employees.emp_name)\
# .join(Employees)
# for item in qset9:
# print(item)
#################################
# 更新,首先找到记录对应的实例,然后对实例重新赋值即可
# 注意,filter的结果是列表的形式
# qset10 = session.query(Departments).filter(Departments.dep_name=='人事部')
# hr = qset10[0] # 从列表中取出第一个元素
# hr.dep_name = '人力资源部'
# session.commit() # 增删改都要commit
#################################
# 删除,将7号部门删除
qset11 = session.query(Departments).filter(Departments.dep_id==7)
sales = qset11[0]
session.delete(sales)
session.commit() | # print(dep.dep_id, dep.dep_name)
#################################
# 过滤,查找2号部门的员工 |
serialization.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions.
"""
# pylint: disable=wildcard-import
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import tf2
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import convolutional
from tensorflow.python.keras.layers import convolutional_recurrent
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import cudnn_recurrent
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers import local
from tensorflow.python.keras.layers import merge
from tensorflow.python.keras.layers import noise
from tensorflow.python.keras.layers import normalization
from tensorflow.python.keras.layers import normalization_v2
from tensorflow.python.keras.layers import pooling
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.keras.layers import wrappers
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.layers.preprocessing import normalization as preprocessing_normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1 as preprocessing_normalization_v1
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.util import tf_inspect as inspect
from tensorflow.python.util.tf_export import keras_export
ALL_MODULES = (
base_layer,
input_layer,
advanced_activations,
convolutional,
convolutional_recurrent,
core,
cudnn_recurrent,
dense_attention,
embeddings,
local,
merge,
noise,
normalization,
pooling,
image_preprocessing,
preprocessing_normalization_v1,
recurrent,
wrappers
)
ALL_V2_MODULES = (
rnn_cell_wrapper_v2,
normalization_v2,
recurrent_v2,
preprocessing_normalization
)
FEATURE_COLUMN_V1_OBJECTS = {}
FEATURE_COLUMN_V2_OBJECTS = {}
# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def inject_feature_column_v1_objects(name, cls):
global FEATURE_COLUMN_V1_OBJECTS
FEATURE_COLUMN_V1_OBJECTS[name] = cls
def inject_feature_column_v2_objects(name, cls):
global FEATURE_COLUMN_V2_OBJECTS
FEATURE_COLUMN_V2_OBJECTS[name] = cls
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in layer.
"""
global LOCAL
if not hasattr(LOCAL, 'ALL_OBJECTS'):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf2.enabled():
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf2.enabled() | ALL_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# Overwrite certain V1 objects with V2 versions
if tf2.enabled():
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_V2_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
# These deserialization aliases are added for backward compatibility,
# as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2"
# were used as class name for v1 and v2 version of BatchNormalization,
# respectively. Here we explicitly convert them to their canonical names.
LOCAL.ALL_OBJECTS['BatchNormalizationV1'] = normalization.BatchNormalization
LOCAL.ALL_OBJECTS[
'BatchNormalizationV2'] = normalization_v2.BatchNormalization
# Prevent circular dependencies.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.premade.linear import LinearModel # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.premade.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top
LOCAL.ALL_OBJECTS['Input'] = input_layer.Input
LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec
LOCAL.ALL_OBJECTS['Network'] = models.Network
LOCAL.ALL_OBJECTS['Model'] = models.Model
LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential
LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel
LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel
if tf2.enabled():
LOCAL.ALL_OBJECTS.update(FEATURE_COLUMN_V2_OBJECTS)
else:
LOCAL.ALL_OBJECTS.update(FEATURE_COLUMN_V1_OBJECTS)
# Merge layers, function versions.
LOCAL.ALL_OBJECTS['add'] = merge.add
LOCAL.ALL_OBJECTS['subtract'] = merge.subtract
LOCAL.ALL_OBJECTS['multiply'] = merge.multiply
LOCAL.ALL_OBJECTS['average'] = merge.average
LOCAL.ALL_OBJECTS['maximum'] = merge.maximum
LOCAL.ALL_OBJECTS['minimum'] = merge.minimum
LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate
LOCAL.ALL_OBJECTS['dot'] = merge.dot
@keras_export('keras.layers.serialize')
def serialize(layer):
return generic_utils.serialize_keras_object(layer)
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
populate_deserializable_objects()
return generic_utils.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name='layer') |
base_cls = base_layer.Layer
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS, |
wdpost_run.go | package storage
import (
"bytes"
"context"
"errors"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types"
)
var errNoPartitions = errors.New("no partitions")
func (s *WindowPoStScheduler) failPost(deadline *miner.DeadlineInfo) {
log.Errorf("TODO")
/*s.failLk.Lock()
if eps > s.failed {
s.failed = eps
}
s.failLk.Unlock()*/
}
func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *miner.DeadlineInfo, ts *types.TipSet) {
ctx, abort := context.WithCancel(ctx)
s.abort = abort
s.activeDeadline = deadline
go func() {
defer abort()
ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.doPost")
defer span.End()
proof, err := s.runPost(ctx, *deadline, ts)
switch err {
case errNoPartitions:
return
case nil:
if err := s.submitPost(ctx, proof); err != nil {
log.Errorf("submitPost failed: %+v", err)
s.failPost(deadline)
return
}
default:
log.Errorf("runPost failed: %+v", err)
s.failPost(deadline)
return
}
}()
}
func (s *WindowPoStScheduler) checkFaults(ctx context.Context, ssi []abi.SectorNumber) ([]abi.SectorNumber, error) {
//faults := s.prover.Scrub(ssi)
log.Warnf("Stub checkFaults")
declaredFaults := map[abi.SectorNumber]struct{}{}
{
chainFaults, err := s.api.StateMinerFaults(ctx, s.actor, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("checking on-chain faults: %w", err)
}
for _, fault := range chainFaults {
declaredFaults[fault] = struct{}{}
}
}
return nil, nil
}
func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo, ts *types.TipSet) (*miner.SubmitWindowedPoStParams, error) {
ctx, span := trace.StartSpan(ctx, "storage.runPost")
defer span.End()
buf := new(bytes.Buffer)
if err := s.actor.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
}
rand, err := s.api.ChainGetRandomness(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes())
if err != nil {
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
}
deadlines, err := s.api.StateMinerDeadlines(ctx, s.actor, ts.Key())
if err != nil {
return nil, err
}
firstPartition, _, err := miner.PartitionsForDeadline(deadlines, s.partitionSectors, di.Index)
if err != nil {
return nil, xerrors.Errorf("getting partitions for deadline: %w", err)
}
partitionCount, _, err := miner.DeadlineCount(deadlines, s.partitionSectors, di.Index)
if err != nil {
return nil, xerrors.Errorf("getting deadline partition count: %w", err)
}
dc, err := deadlines.Due[di.Index].Count()
if err != nil {
return nil, xerrors.Errorf("get deadline count: %w", err)
} | log.Infof("pc: %+v", partitionCount)
log.Infof("ts: %+v (%d)", ts.Key(), ts.Height())
if partitionCount == 0 {
return nil, errNoPartitions
}
partitions := make([]uint64, partitionCount)
for i := range partitions {
partitions[i] = firstPartition + uint64(i)
}
ssi, err := s.sortedSectorInfo(ctx, deadlines.Due[di.Index], ts)
if err != nil {
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
}
if len(ssi) == 0 {
log.Warn("attempted to run windowPost without any sectors...")
return nil, xerrors.Errorf("no sectors to run windowPost on")
}
log.Infow("running windowPost",
"chain-random", rand,
"deadline", di,
"height", ts.Height())
var snums []abi.SectorNumber
for _, si := range ssi {
snums = append(snums, si.SectorNumber)
}
faults, err := s.checkFaults(ctx, snums)
if err != nil {
log.Errorf("Failed to declare faults: %+v", err)
}
tsStart := time.Now()
log.Infow("generating windowPost",
"sectors", len(ssi),
"faults", len(faults))
mid, err := address.IDFromAddress(s.actor)
if err != nil {
return nil, err
}
// TODO: Faults!
postOut, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), ssi, abi.PoStRandomness(rand))
if err != nil {
return nil, xerrors.Errorf("running post failed: %w", err)
}
if len(postOut) == 0 {
return nil, xerrors.Errorf("received proofs back from generate window post")
}
elapsed := time.Since(tsStart)
log.Infow("submitting window PoSt", "elapsed", elapsed)
return &miner.SubmitWindowedPoStParams{
Partitions: partitions,
Proofs: postOut,
Skipped: *abi.NewBitField(), // TODO: Faults here?
}, nil
}
func (s *WindowPoStScheduler) sortedSectorInfo(ctx context.Context, deadlineSectors *abi.BitField, ts *types.TipSet) ([]abi.SectorInfo, error) {
sset, err := s.api.StateMinerSectors(ctx, s.actor, deadlineSectors, false, ts.Key())
if err != nil {
return nil, err
}
sbsi := make([]abi.SectorInfo, len(sset))
for k, sector := range sset {
sbsi[k] = abi.SectorInfo{
SectorNumber: sector.ID,
SealedCID: sector.Info.Info.SealedCID,
RegisteredProof: sector.Info.Info.RegisteredProof,
}
}
return sbsi, nil
}
func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) error {
ctx, span := trace.StartSpan(ctx, "storage.commitPost")
defer span.End()
enc, aerr := actors.SerializeParams(proof)
if aerr != nil {
return xerrors.Errorf("could not serialize submit post parameters: %w", aerr)
}
msg := &types.Message{
To: s.actor,
From: s.worker,
Method: builtin.MethodsMiner.SubmitWindowedPoSt,
Params: enc,
Value: types.NewInt(1000), // currently hard-coded late fee in actor, returned if not late
GasLimit: 10000000, // i dont know help
GasPrice: types.NewInt(1),
}
// TODO: consider maybe caring about the output
sm, err := s.api.MpoolPushMessage(ctx, msg)
if err != nil {
return xerrors.Errorf("pushing message to mpool: %w", err)
}
log.Infof("Submitted window post: %s", sm.Cid())
go func() {
rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid())
if err != nil {
log.Error(err)
return
}
if rec.Receipt.ExitCode == 0 {
return
}
log.Errorf("Submitting window post %s failed: exit %d", sm.Cid(), rec.Receipt.ExitCode)
}()
return nil
} |
log.Infof("di: %+v", di)
log.Infof("dc: %+v", dc)
log.Infof("fp: %+v", firstPartition) |
api.rs | use std::collections::HashMap;
use std::cell::RefCell;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use crate::client;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// View and manage your data across Google Cloud Platform services
CloudPlatform,
/// View your data across Google Cloud Platform services
CloudPlatformReadOnly,
/// View and administer all your Firebase data and settings
Firebase,
/// View all your Firebase data and settings
FirebaseReadonly,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform",
Scope::CloudPlatformReadOnly => "https://www.googleapis.com/auth/cloud-platform.read-only",
Scope::Firebase => "https://www.googleapis.com/auth/firebase",
Scope::FirebaseReadonly => "https://www.googleapis.com/auth/firebase.readonly",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::FirebaseReadonly
}
}
// ########
// HUB ###
// ######
/// Central instance to access all FirebaseHosting related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::{Result, Error};
/// # async fn dox() {
/// use std::default::Default;
/// use oauth2;
/// use firebasehosting1_beta1::FirebaseHosting;
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: oauth2::ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_files_list("parent")
/// .status("takimata")
/// .page_token("amet.")
/// .page_size(-20)
/// .doit().await;
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::Io(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
#[derive(Clone)]
pub struct FirebaseHosting<> {
client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>,
auth: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, > client::Hub for FirebaseHosting<> {}
impl<'a, > FirebaseHosting<> {
pub fn new(client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> FirebaseHosting<> {
FirebaseHosting {
client,
auth: authenticator,
_user_agent: "google-api-rust-client/2.0.8".to_string(),
_base_url: "https://firebasehosting.googleapis.com/".to_string(),
_root_url: "https://firebasehosting.googleapis.com/".to_string(),
}
}
pub fn projects(&'a self) -> ProjectMethods<'a> {
ProjectMethods { hub: &self }
}
pub fn sites(&'a self) -> SiteMethods<'a> {
SiteMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/2.0.8`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://firebasehosting.googleapis.com/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://firebasehosting.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// Contains metadata about the user who performed an action, such as creating a release or finalizing a version.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ActingUser {
/// The email address of the user when the user performed the action.
pub email: Option<String>,
/// A profile image URL for the user. May not be present if the user has changed their email address or deleted their account.
#[serde(rename="imageUrl")]
pub image_url: Option<String>,
}
impl client::Part for ActingUser {}
/// Represents a DNS certificate challenge.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CertDnsChallenge {
/// The domain name upon which the DNS challenge must be satisfied.
#[serde(rename="domainName")]
pub domain_name: Option<String>,
/// The value that must be present as a TXT record on the domain name to satisfy the challenge.
pub token: Option<String>,
}
impl client::Part for CertDnsChallenge {}
/// Represents an HTTP certificate challenge.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CertHttpChallenge {
/// The URL path on which to serve the specified token to satisfy the certificate challenge.
pub path: Option<String>,
/// The token to serve at the specified URL path to satisfy the certificate challenge.
pub token: Option<String>,
}
impl client::Part for CertHttpChallenge {}
/// A `Channel` represents a stream of releases for a site. All sites have a default `live` channel that serves content to the Firebase-provided subdomains and any connected custom domains.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites channels create projects](ProjectSiteChannelCreateCall) (request|response)
/// * [sites channels get projects](ProjectSiteChannelGetCall) (response)
/// * [sites channels patch projects](ProjectSiteChannelPatchCall) (request|response)
/// * [channels create sites](SiteChannelCreateCall) (request|response)
/// * [channels get sites](SiteChannelGetCall) (response)
/// * [channels patch sites](SiteChannelPatchCall) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Channel {
/// Output only. The time at which the channel was created.
#[serde(rename="createTime")]
pub create_time: Option<String>,
/// The time at which the channel will be automatically deleted. If null, the channel will not be automatically deleted. This field is present in the output whether it's set directly or via the `ttl` field.
#[serde(rename="expireTime")]
pub expire_time: Option<String>,
/// Text labels used for extra metadata and/or filtering.
pub labels: Option<HashMap<String, String>>,
/// The fully-qualified resource name for the channel, in the format: sites/ SITE_ID/channels/CHANNEL_ID
pub name: Option<String>,
/// Output only. The current release for the channel, if any.
pub release: Option<Release>,
/// The number of previous releases to retain on the channel for rollback or other purposes. Must be a number between 1-100. Defaults to 10 for new channels.
#[serde(rename="retainedReleaseCount")]
pub retained_release_count: Option<i32>,
/// Input only. A time-to-live for this channel. Sets `expire_time` to the provided duration past the time of the request.
pub ttl: Option<String>,
/// Output only. The time at which the channel was last updated.
#[serde(rename="updateTime")]
pub update_time: Option<String>,
/// Output only. The URL at which the content of this channel's current release can be viewed. This URL is a Firebase-provided subdomain of `web.app`. The content of this channel's current release can also be viewed at the Firebase-provided subdomain of `firebaseapp.com`. If this channel is the `live` channel for the Hosting site, then the content of this channel's current release can also be viewed at any connected custom domains.
pub url: Option<String>,
}
impl client::RequestValue for Channel {}
impl client::ResponseResult for Channel {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions clone projects](ProjectSiteVersionCloneCall) (request)
/// * [versions clone sites](SiteVersionCloneCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CloneVersionRequest {
/// If provided, only paths that do not match any of the RegEx values in this list will be included in the new version.
pub exclude: Option<PathFilter>,
/// If true, the call to `CloneVersion` immediately finalizes the version after cloning is complete. If false, the cloned version will have a status of `CREATED`. Use [`UpdateVersion`](patch) to set the status of the version to `FINALIZED`.
pub finalize: Option<bool>,
/// If provided, only paths that match one or more RegEx values in this list will be included in the new version.
pub include: Option<PathFilter>,
/// Required. The unique identifier for the version to be cloned, in the format: sites/SITE_ID/versions/VERSION_ID
#[serde(rename="sourceVersion")]
pub source_version: Option<String>,
}
impl client::RequestValue for CloneVersionRequest {}
/// A configured rewrite that directs requests to a Cloud Run service. If the Cloud Run service does not exist when setting or updating your Firebase Hosting configuration, then the request fails. Any errors from the Cloud Run service are passed to the end user (for example, if you delete a service, any requests directed to that service receive a `404` error).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CloudRunRewrite {
/// Optional. User-provided region where the Cloud Run service is hosted. Defaults to `us-central1` if not supplied.
pub region: Option<String>,
/// Required. User-defined ID of the Cloud Run service.
#[serde(rename="serviceId")]
pub service_id: Option<String>,
}
impl client::Part for CloudRunRewrite {}
/// The intended behavior and status information of a domain.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites domains create projects](ProjectSiteDomainCreateCall) (request|response)
/// * [sites domains get projects](ProjectSiteDomainGetCall) (response)
/// * [sites domains update projects](ProjectSiteDomainUpdateCall) (request|response)
/// * [domains create sites](SiteDomainCreateCall) (request|response)
/// * [domains get sites](SiteDomainGetCall) (response)
/// * [domains update sites](SiteDomainUpdateCall) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Domain {
/// Required. The domain name of the association.
#[serde(rename="domainName")]
pub domain_name: Option<String>,
/// If set, the domain should redirect with the provided parameters.
#[serde(rename="domainRedirect")]
pub domain_redirect: Option<DomainRedirect>,
/// Output only. Information about the provisioning of certificates and the health of the DNS resolution for the domain.
pub provisioning: Option<DomainProvisioning>,
/// Required. The site name of the association.
pub site: Option<String>,
/// Output only. Additional status of the domain association.
pub status: Option<String>,
/// Output only. The time at which the domain was last updated.
#[serde(rename="updateTime")]
pub update_time: Option<String>,
}
impl client::RequestValue for Domain {}
impl client::ResponseResult for Domain {}
/// The current certificate provisioning status information for a domain.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DomainProvisioning {
/// The TXT records (for the certificate challenge) that were found at the last DNS fetch.
#[serde(rename="certChallengeDiscoveredTxt")]
pub cert_challenge_discovered_txt: Option<Vec<String>>,
/// The DNS challenge for generating a certificate.
#[serde(rename="certChallengeDns")]
pub cert_challenge_dns: Option<CertDnsChallenge>,
/// The HTTP challenge for generating a certificate.
#[serde(rename="certChallengeHttp")]
pub cert_challenge_http: Option<CertHttpChallenge>,
/// The certificate provisioning status; updated when Firebase Hosting provisions an SSL certificate for the domain.
#[serde(rename="certStatus")]
pub cert_status: Option<String>,
/// The IPs found at the last DNS fetch.
#[serde(rename="discoveredIps")]
pub discovered_ips: Option<Vec<String>>,
/// The time at which the last DNS fetch occurred.
#[serde(rename="dnsFetchTime")]
pub dns_fetch_time: Option<String>,
/// The DNS record match status as of the last DNS fetch.
#[serde(rename="dnsStatus")]
pub dns_status: Option<String>,
/// The list of IPs to which the domain is expected to resolve.
#[serde(rename="expectedIps")]
pub expected_ips: Option<Vec<String>>,
}
impl client::Part for DomainProvisioning {}
/// Defines the behavior of a domain-level redirect. Domain redirects preserve the path of the redirect but replace the requested domain with the one specified in the redirect configuration.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct DomainRedirect {
/// Required. The domain name to redirect to.
#[serde(rename="domainName")]
pub domain_name: Option<String>,
/// Required. The redirect status code.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl client::Part for DomainRedirect {}
/// A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites channels delete projects](ProjectSiteChannelDeleteCall) (response)
/// * [sites domains delete projects](ProjectSiteDomainDeleteCall) (response)
/// * [sites versions delete projects](ProjectSiteVersionDeleteCall) (response)
/// * [sites delete projects](ProjectSiteDeleteCall) (response)
/// * [channels delete sites](SiteChannelDeleteCall) (response)
/// * [domains delete sites](SiteDomainDeleteCall) (response)
/// * [versions delete sites](SiteVersionDeleteCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Empty { _never_set: Option<bool> }
impl client::ResponseResult for Empty {}
/// A [`Header`](https://firebase.google.com/docs/hosting/full-config#headers) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Header {
/// The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
pub glob: Option<String>,
/// Required. The additional headers to add to the response.
pub headers: Option<HashMap<String, String>>,
/// The user-supplied RE2 regular expression to match against the request URL path.
pub regex: Option<String>,
}
impl client::Part for Header {}
/// If provided, i18n rewrites are enabled.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct I18nConfig {
/// Required. The user-supplied path where country and language specific content will be looked for within the public directory.
pub root: Option<String>,
}
impl client::Part for I18nConfig {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites channels list projects](ProjectSiteChannelListCall) (response)
/// * [channels list sites](SiteChannelListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListChannelsResponse {
/// The list of channels.
pub channels: Option<Vec<Channel>>,
/// The pagination token, if more results exist beyond the ones in this response. Include this token in your next call to `ListChannels`. Page tokens are short-lived and should not be stored.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
}
impl client::ResponseResult for ListChannelsResponse {}
/// The response to listing Domains.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites domains list projects](ProjectSiteDomainListCall) (response)
/// * [domains list sites](SiteDomainListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListDomainsResponse {
/// The list of domains, if any exist.
pub domains: Option<Vec<Domain>>,
/// The pagination token, if more results exist.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
}
impl client::ResponseResult for ListDomainsResponse {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites channels releases list projects](ProjectSiteChannelReleaseListCall) (response)
/// * [sites releases list projects](ProjectSiteReleaseListCall) (response)
/// * [channels releases list sites](SiteChannelReleaseListCall) (response)
/// * [releases list sites](SiteReleaseListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListReleasesResponse {
/// The pagination token, if more results exist beyond the ones in this response. Include this token in your next call to `ListReleases`. Page tokens are short-lived and should not be stored.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// The list of hashes of files that still need to be uploaded, if any exist.
pub releases: Option<Vec<Release>>,
}
impl client::ResponseResult for ListReleasesResponse {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites list projects](ProjectSiteListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListSitesResponse {
/// The pagination token, if more results exist beyond the ones in this response. Include this token in your next call to `ListSites`. Page tokens are short-lived and should not be stored.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// A list of Site objects associated with the specified Firebase project.
pub sites: Option<Vec<Site>>,
}
impl client::ResponseResult for ListSitesResponse {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions files list projects](ProjectSiteVersionFileListCall) (response)
/// * [versions files list sites](SiteVersionFileListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListVersionFilesResponse {
/// The list of paths to the hashes of the files in the specified version.
pub files: Option<Vec<VersionFile>>,
/// The pagination token, if more results exist beyond the ones in this response. Include this token in your next call to `ListVersionFiles`. Page tokens are short-lived and should not be stored.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
}
impl client::ResponseResult for ListVersionFilesResponse {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions list projects](ProjectSiteVersionListCall) (response)
/// * [versions list sites](SiteVersionListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ListVersionsResponse {
/// The pagination token, if more results exist beyond the ones in this response. Include this token in your next call to `ListVersions`. Page tokens are short-lived and should not be stored.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// The list of versions, if any exist.
pub versions: Option<Vec<Version>>,
}
impl client::ResponseResult for ListVersionsResponse {}
/// This resource represents a long-running operation that is the result of a network API call.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [operations get projects](ProjectOperationGetCall) (response)
/// * [sites versions clone projects](ProjectSiteVersionCloneCall) (response)
/// * [versions clone sites](SiteVersionCloneCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Operation {
/// If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
pub done: Option<bool>,
/// The error result of the operation in case of failure or cancellation.
pub error: Option<Status>,
/// Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
pub metadata: Option<HashMap<String, String>>,
/// The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
pub name: Option<String>,
/// The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
pub response: Option<HashMap<String, String>>,
}
impl client::ResponseResult for Operation {}
/// A representation of filter path.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PathFilter {
/// An array of RegEx values by which to filter.
pub regexes: Option<Vec<String>>,
}
impl client::Part for PathFilter {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions populate files projects](ProjectSiteVersionPopulateFileCall) (request)
/// * [versions populate files sites](SiteVersionPopulateFileCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PopulateVersionFilesRequest {
/// A set of file paths to the hashes corresponding to assets that should be added to the version. A file path to an empty hash will remove the path from the version. Calculate a hash by Gzipping the file then taking the SHA256 hash of the newly compressed file.
pub files: Option<HashMap<String, String>>,
}
impl client::RequestValue for PopulateVersionFilesRequest {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions populate files projects](ProjectSiteVersionPopulateFileCall) (response)
/// * [versions populate files sites](SiteVersionPopulateFileCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PopulateVersionFilesResponse {
/// The content hashes of the specified files that need to be uploaded to the specified URL.
#[serde(rename="uploadRequiredHashes")]
pub upload_required_hashes: Option<Vec<String>>,
/// The URL to which the files should be uploaded, in the format: "https://upload-firebasehosting.googleapis.com/upload/sites/SITE_ID /versions/VERSION_ID/files" Perform a multipart `POST` of the Gzipped file contents to the URL using a forward slash and the hash of the file appended to the end.
#[serde(rename="uploadUrl")]
pub upload_url: Option<String>,
}
impl client::ResponseResult for PopulateVersionFilesResponse {}
/// Deprecated in favor of [site channels](sites.channels).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PreviewConfig {
/// If true, preview URLs are enabled for this version.
pub active: Option<bool>,
/// Indicates the expiration time for previewing this version; preview URL requests received after this time will 404.
#[serde(rename="expireTime")]
pub expire_time: Option<String>,
}
impl client::Part for PreviewConfig {}
/// A [`Redirect`](https://firebase.google.com/docs/hosting/full-config#redirects) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Redirect {
/// The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
pub glob: Option<String>,
/// Required. The value to put in the HTTP location header of the response. The location can contain capture group values from the pattern using a `:` prefix to identify the segment and an optional `*` to capture the rest of the URL. For example: "glob": "/:capture*", "statusCode": 301, "location": "https://example.com/foo/:capture"
pub location: Option<String>,
/// The user-supplied RE2 regular expression to match against the request URL path.
pub regex: Option<String>,
/// Required. The status HTTP code to return in the response. It must be a valid 3xx status code.
#[serde(rename="statusCode")]
pub status_code: Option<i32>,
}
impl client::Part for Redirect {}
/// A `Release` is a particular [collection of configurations and files](sites.versions) that is set to be public at a particular time.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites channels releases create projects](ProjectSiteChannelReleaseCreateCall) (request|response)
/// * [sites releases create projects](ProjectSiteReleaseCreateCall) (request|response)
/// * [channels releases create sites](SiteChannelReleaseCreateCall) (request|response)
/// * [releases create sites](SiteReleaseCreateCall) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Release {
/// The deploy description when the release was created. The value can be up to 512 characters.
pub message: Option<String>,
/// Output only. The unique identifier for the release, in either of the following formats: - sites/SITE_ID/releases/RELEASE_ID - sites/SITE_ID/channels/CHANNEL_ID/releases/RELEASE_ID This name is provided in the response body when you call [`releases.create`](sites.releases/create) or [`channels.releases.create`](sites.channels.releases/create).
pub name: Option<String>,
/// Output only. The time at which the version is set to be public.
#[serde(rename="releaseTime")]
pub release_time: Option<String>,
/// Output only. Identifies the user who created the release.
#[serde(rename="releaseUser")]
pub release_user: Option<ActingUser>,
/// Explains the reason for the release. Specify a value for this field only when creating a `SITE_DISABLE` type release.
#[serde(rename="type")]
pub type_: Option<String>,
/// Output only. The configuration and content that was released.
pub version: Option<Version>,
}
impl client::RequestValue for Release {}
impl client::ResponseResult for Release {}
/// A [`Rewrite`](https://firebase.google.com/docs/hosting/full-config#rewrites) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Rewrite {
/// The request will be forwarded to Firebase Dynamic Links.
#[serde(rename="dynamicLinks")]
pub dynamic_links: Option<bool>,
/// The function to proxy requests to. Must match the exported function name exactly.
pub function: Option<String>,
/// The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
pub glob: Option<String>,
/// The URL path to rewrite the request to.
pub path: Option<String>,
/// The user-supplied RE2 regular expression to match against the request URL path.
pub regex: Option<String>,
/// The request will be forwarded to Cloud Run.
pub run: Option<CloudRunRewrite>,
}
impl client::Part for Rewrite {}
/// The configuration for how incoming requests to a site should be routed and processed before serving content. The URL request paths are matched against the specified URL patterns in the configuration, then Hosting applies the applicable configuration according to a specific [priority order](https://firebase.google.com/docs/hosting/full-config#hosting_priority_order).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ServingConfig {
/// How to handle well known App Association files.
#[serde(rename="appAssociation")]
pub app_association: Option<String>,
/// Defines whether to drop the file extension from uploaded files.
#[serde(rename="cleanUrls")]
pub clean_urls: Option<bool>,
/// An array of objects, where each object specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
pub headers: Option<Vec<Header>>,
/// Optional. Defines i18n rewrite behavior.
pub i18n: Option<I18nConfig>,
/// An array of objects (called redirect rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
pub redirects: Option<Vec<Redirect>>,
/// An array of objects (called rewrite rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
pub rewrites: Option<Vec<Rewrite>>,
/// Defines how to handle a trailing slash in the URL path.
#[serde(rename="trailingSlashBehavior")]
pub trailing_slash_behavior: Option<String>,
}
impl client::Part for ServingConfig {}
/// A `Site` represents a Firebase Hosting site.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites create projects](ProjectSiteCreateCall) (request|response)
/// * [sites get projects](ProjectSiteGetCall) (response)
/// * [sites patch projects](ProjectSitePatchCall) (request|response)
/// * [channels releases create sites](SiteChannelReleaseCreateCall) (none)
/// * [channels releases list sites](SiteChannelReleaseListCall) (none)
/// * [channels create sites](SiteChannelCreateCall) (none)
/// * [channels delete sites](SiteChannelDeleteCall) (none)
/// * [channels get sites](SiteChannelGetCall) (none)
/// * [channels list sites](SiteChannelListCall) (none)
/// * [channels patch sites](SiteChannelPatchCall) (none)
/// * [domains create sites](SiteDomainCreateCall) (none)
/// * [domains delete sites](SiteDomainDeleteCall) (none)
/// * [domains get sites](SiteDomainGetCall) (none)
/// * [domains list sites](SiteDomainListCall) (none)
/// * [domains update sites](SiteDomainUpdateCall) (none)
/// * [releases create sites](SiteReleaseCreateCall) (none)
/// * [releases list sites](SiteReleaseListCall) (none)
/// * [versions files list sites](SiteVersionFileListCall) (none)
/// * [versions clone sites](SiteVersionCloneCall) (none)
/// * [versions create sites](SiteVersionCreateCall) (none)
/// * [versions delete sites](SiteVersionDeleteCall) (none)
/// * [versions list sites](SiteVersionListCall) (none)
/// * [versions patch sites](SiteVersionPatchCall) (none)
/// * [versions populate files sites](SiteVersionPopulateFileCall) (none)
/// * [get config sites](SiteGetConfigCall) (none)
/// * [update config sites](SiteUpdateConfigCall) (none)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Site {
/// Optional. The [ID of a Web App](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects.webApps#WebApp.FIELDS.app_id) associated with the Hosting site.
#[serde(rename="appId")]
pub app_id: Option<String>,
/// Output only. The default URL for the Hosting site.
#[serde(rename="defaultUrl")]
pub default_url: Option<String>,
/// Optional. User-specified labels for the Hosting site.
pub labels: Option<HashMap<String, String>>,
/// Output only. The fully-qualified resource name of the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID PROJECT_IDENTIFIER: the Firebase project's [`ProjectNumber`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510).
pub name: Option<String>,
/// Output only. The type of Hosting site. Every Firebase project has a `DEFAULT_SITE`, which is created when Hosting is provisioned for the project. All additional sites are `USER_SITE`.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl client::RequestValue for Site {}
impl client::Resource for Site {}
impl client::ResponseResult for Site {}
/// A `SiteConfig` contains metadata associated with a specific site that controls Firebase Hosting serving behavior
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites get config projects](ProjectSiteGetConfigCall) (response)
/// * [sites update config projects](ProjectSiteUpdateConfigCall) (request|response)
/// * [get config sites](SiteGetConfigCall) (response)
/// * [update config sites](SiteUpdateConfigCall) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct SiteConfig {
/// Whether or not web requests made by site visitors are logged via Cloud Logging.
#[serde(rename="cloudLoggingEnabled")]
pub cloud_logging_enabled: Option<bool>,
/// The number of FINALIZED versions that will be held for a site before automatic deletion. When a new version is deployed, content for versions in storage in excess of this number will be deleted, and will no longer be billed for storage usage. Oldest versions will be deleted first; sites are created with an unlimited number of max_versions by default.
#[serde(rename="maxVersions")]
pub max_versions: Option<String>,
}
impl client::RequestValue for SiteConfig {}
impl client::ResponseResult for SiteConfig {}
/// The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Status {
/// The status code, which should be an enum value of google.rpc.Code.
pub code: Option<i32>,
/// A list of messages that carry the error details. There is a common set of message types for APIs to use.
pub details: Option<Vec<HashMap<String, String>>>,
/// A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
pub message: Option<String>,
}
impl client::Part for Status {}
/// A `Version` is a configuration and a collection of static files which determine how a site is displayed.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [sites versions create projects](ProjectSiteVersionCreateCall) (request|response)
/// * [sites versions patch projects](ProjectSiteVersionPatchCall) (request|response)
/// * [versions create sites](SiteVersionCreateCall) (request|response)
/// * [versions patch sites](SiteVersionPatchCall) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// The configuration for the behavior of the site. This configuration exists in the [`firebase.json`](https://firebase.google.com/docs/cli/#the_firebasejson_file) file.
pub config: Option<ServingConfig>,
/// Output only. The time at which the version was created.
#[serde(rename="createTime")]
pub create_time: Option<String>,
/// Output only. Identifies the user who created the version.
#[serde(rename="createUser")]
pub create_user: Option<ActingUser>,
/// Output only. The time at which the version was `DELETED`.
#[serde(rename="deleteTime")]
pub delete_time: Option<String>,
/// Output only. Identifies the user who `DELETED` the version.
#[serde(rename="deleteUser")]
pub delete_user: Option<ActingUser>,
/// Output only. The total number of files associated with the version. This value is calculated after a version is `FINALIZED`.
#[serde(rename="fileCount")]
pub file_count: Option<String>,
/// Output only. The time at which the version was `FINALIZED`.
#[serde(rename="finalizeTime")]
pub finalize_time: Option<String>,
/// Output only. Identifies the user who `FINALIZED` the version.
#[serde(rename="finalizeUser")]
pub finalize_user: Option<ActingUser>,
/// The labels used for extra metadata and/or filtering.
pub labels: Option<HashMap<String, String>>,
/// The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
pub name: Option<String>,
/// Deprecated in favor of [site channels](sites.channels).
pub preview: Option<PreviewConfig>,
/// The deploy status of the version. For a successful deploy, call [`CreateVersion`](sites.versions/create) to make a new version (`CREATED` status), [upload all desired files](sites.versions/populateFiles) to the version, then [update](sites.versions/patch) the version to the `FINALIZED` status. Note that if you leave the version in the `CREATED` state for more than 12 hours, the system will automatically mark the version as `ABANDONED`. You can also change the status of a version to `DELETED` by calling [`DeleteVersion`](sites.versions/delete).
pub status: Option<String>,
/// Output only. The total stored bytesize of the version. This value is calculated after a version is `FINALIZED`.
#[serde(rename="versionBytes")]
pub version_bytes: Option<String>,
}
impl client::RequestValue for Version {}
impl client::ResponseResult for Version {}
/// A static content file that is part of a version.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct VersionFile {
/// The SHA256 content hash of the file.
pub hash: Option<String>,
/// The URI at which the file's content should display.
pub path: Option<String>,
/// Output only. The current status of a particular file in the specified version. The value will be either `pending upload` or `uploaded`.
pub status: Option<String>,
}
impl client::Part for VersionFile {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all methods supported on *project* resources.
/// It is not used directly, but through the `FirebaseHosting` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
///
/// # async fn dox() {
/// use std::default::Default;
/// use oauth2;
/// use firebasehosting1_beta1::FirebaseHosting;
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `operations_get(...)`, `sites_channels_create(...)`, `sites_channels_delete(...)`, `sites_channels_get(...)`, `sites_channels_list(...)`, `sites_channels_patch(...)`, `sites_channels_releases_create(...)`, `sites_channels_releases_list(...)`, `sites_create(...)`, `sites_delete(...)`, `sites_domains_create(...)`, `sites_domains_delete(...)`, `sites_domains_get(...)`, `sites_domains_list(...)`, `sites_domains_update(...)`, `sites_get(...)`, `sites_get_config(...)`, `sites_list(...)`, `sites_patch(...)`, `sites_releases_create(...)`, `sites_releases_list(...)`, `sites_update_config(...)`, `sites_versions_clone(...)`, `sites_versions_create(...)`, `sites_versions_delete(...)`, `sites_versions_files_list(...)`, `sites_versions_list(...)`, `sites_versions_patch(...)` and `sites_versions_populate_files(...)`
/// // to build up your call.
/// let rb = hub.projects();
/// # }
/// ```
pub struct ProjectMethods<'a>
where {
hub: &'a FirebaseHosting<>,
}
impl<'a> client::MethodsBuilder for ProjectMethods<'a> {}
impl<'a> ProjectMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
///
/// # Arguments
///
/// * `name` - The name of the operation resource.
pub fn operations_get(&self, name: &str) -> ProjectOperationGetCall<'a> {
ProjectOperationGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_channels_releases_create(&self, request: Release, parent: &str) -> ProjectSiteChannelReleaseCreateCall<'a> {
ProjectSiteChannelReleaseCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_name: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_channels_releases_list(&self, parent: &str) -> ProjectSiteChannelReleaseListCall<'a> {
ProjectSiteChannelReleaseListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new channel in the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site in which to create this channel, in the format: sites/ SITE_ID
pub fn sites_channels_create(&self, request: Channel, parent: &str) -> ProjectSiteChannelCreateCall<'a> {
ProjectSiteChannelCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_channel_id: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the specified channel of the specified site. The `live` channel cannot be deleted.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_channels_delete(&self, name: &str) -> ProjectSiteChannelDeleteCall<'a> {
ProjectSiteChannelDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Retrieves information for the specified channel of the specified site.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_channels_get(&self, name: &str) -> ProjectSiteChannelGetCall<'a> {
ProjectSiteChannelGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the channels for the specified site. All sites have a default `live` channel.
///
/// # Arguments
///
/// * `parent` - Required. The site for which to list channels, in the format: sites/SITE_ID
pub fn sites_channels_list(&self, parent: &str) -> ProjectSiteChannelListCall<'a> {
ProjectSiteChannelListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates information for the specified channel of the specified site. Implicitly creates the channel if it doesn't already exist.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - The fully-qualified resource name for the channel, in the format: sites/ SITE_ID/channels/CHANNEL_ID
pub fn sites_channels_patch(&self, request: Channel, name: &str) -> ProjectSiteChannelPatchCall<'a> {
ProjectSiteChannelPatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a domain mapping on the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The parent to create the domain association for, in the format: sites/site-name
pub fn sites_domains_create(&self, request: Domain, parent: &str) -> ProjectSiteDomainCreateCall<'a> {
ProjectSiteDomainCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the existing domain mapping on the specified site.
///
/// # Arguments
///
/// * `name` - Required. The name of the domain association to delete.
pub fn sites_domains_delete(&self, name: &str) -> ProjectSiteDomainDeleteCall<'a> {
ProjectSiteDomainDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets a domain mapping on the specified site.
///
/// # Arguments
///
/// * `name` - Required. The name of the domain configuration to get.
pub fn sites_domains_get(&self, name: &str) -> ProjectSiteDomainGetCall<'a> {
ProjectSiteDomainGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the domains for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The parent for which to list domains, in the format: sites/ site-name
pub fn sites_domains_list(&self, parent: &str) -> ProjectSiteDomainListCall<'a> {
ProjectSiteDomainListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates the specified domain mapping, creating the mapping as if it does not exist.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Required. The name of the domain association to update or create, if an association doesn't already exist.
pub fn sites_domains_update(&self, request: Domain, name: &str) -> ProjectSiteDomainUpdateCall<'a> {
ProjectSiteDomainUpdateCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_releases_create(&self, request: Release, parent: &str) -> ProjectSiteReleaseCreateCall<'a> {
ProjectSiteReleaseCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_name: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_releases_list(&self, parent: &str) -> ProjectSiteReleaseListCall<'a> {
ProjectSiteReleaseListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the remaining files to be uploaded for the specified version.
///
/// # Arguments
///
/// * `parent` - Required. The version for which to list files, in the format: sites/SITE_ID /versions/VERSION_ID
pub fn sites_versions_files_list(&self, parent: &str) -> ProjectSiteVersionFileListCall<'a> {
ProjectSiteVersionFileListCall {
hub: self.hub,
_parent: parent.to_string(),
_status: Default::default(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new version on the specified target site using the content of the specified version.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The target site for the cloned version, in the format: sites/ SITE_ID
pub fn sites_versions_clone(&self, request: CloneVersionRequest, parent: &str) -> ProjectSiteVersionCloneCall<'a> {
ProjectSiteVersionCloneCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new version for the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site in which to create the version, in the format: sites/ SITE_ID
pub fn sites_versions_create(&self, request: Version, parent: &str) -> ProjectSiteVersionCreateCall<'a> {
ProjectSiteVersionCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_id: Default::default(),
_size_bytes: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the specified version.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the version, in the format: sites/SITE_ID/versions/VERSION_ID
pub fn sites_versions_delete(&self, name: &str) -> ProjectSiteVersionDeleteCall<'a> {
ProjectSiteVersionDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the versions that have been created for the specified site. This list includes versions for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list versions, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn sites_versions_list(&self, parent: &str) -> ProjectSiteVersionListCall<'a> {
ProjectSiteVersionListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_filter: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates the specified metadata for the specified version. This method will fail with `FAILED_PRECONDITION` in the event of an invalid state transition. The supported [state](../sites.versions#versionstatus) transitions for a version are from `CREATED` to `FINALIZED`. Use [`DeleteVersion`](delete) to set the status of a version to `DELETED`.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
pub fn sites_versions_patch(&self, request: Version, name: &str) -> ProjectSiteVersionPatchCall<'a> {
ProjectSiteVersionPatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Adds content files to the specified version. Each file must be under 2 GB.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The version to which to add files, in the format: sites/SITE_ID /versions/VERSION_ID
pub fn sites_versions_populate_files(&self, request: PopulateVersionFilesRequest, parent: &str) -> ProjectSiteVersionPopulateFileCall<'a> {
ProjectSiteVersionPopulateFileCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new Hosting Site in the specified parent Firebase project. Note that Hosting sites can take several minutes to propagate through Firebase systems.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The Firebase project in which to create a Hosting site, in the format: projects/PROJECT_IDENTIFIER Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
pub fn sites_create(&self, request: Site, parent: &str) -> ProjectSiteCreateCall<'a> {
ProjectSiteCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_site_id: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the specified Hosting Site from the specified parent Firebase project.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
pub fn sites_delete(&self, name: &str) -> ProjectSiteDeleteCall<'a> {
ProjectSiteDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets the specified Hosting Site.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values. Since a SITE_ID is a globally unique identifier, you can also use the unique sub-collection resource access pattern, in the format: projects/-/sites/SITE_ID
pub fn sites_get(&self, name: &str) -> ProjectSiteGetCall<'a> {
ProjectSiteGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets the Hosting metadata for a specific site.
///
/// # Arguments
///
/// * `name` - Required. The site for which to get the SiteConfig, in the format: sites/ site-name/config
pub fn sites_get_config(&self, name: &str) -> ProjectSiteGetConfigCall<'a> {
ProjectSiteGetConfigCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists each Hosting Site associated with the specified parent Firebase project.
///
/// # Arguments
///
/// * `parent` - Required. The Firebase project for which to list sites, in the format: projects/PROJECT_IDENTIFIER Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
pub fn sites_list(&self, parent: &str) -> ProjectSiteListCall<'a> {
ProjectSiteListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates attributes of the specified Hosting Site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Output only. The fully-qualified resource name of the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID PROJECT_IDENTIFIER: the Firebase project's [`ProjectNumber`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510).
pub fn sites_patch(&self, request: Site, name: &str) -> ProjectSitePatchCall<'a> {
ProjectSitePatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Sets the Hosting metadata for a specific site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Required. The site for which to update the SiteConfig, in the format: sites/ site-name/config
pub fn sites_update_config(&self, request: SiteConfig, name: &str) -> ProjectSiteUpdateConfigCall<'a> {
ProjectSiteUpdateConfigCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
/// A builder providing access to all methods supported on *site* resources.
/// It is not used directly, but through the `FirebaseHosting` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
///
/// # async fn dox() {
/// use std::default::Default;
/// use oauth2;
/// use firebasehosting1_beta1::FirebaseHosting;
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `channels_create(...)`, `channels_delete(...)`, `channels_get(...)`, `channels_list(...)`, `channels_patch(...)`, `channels_releases_create(...)`, `channels_releases_list(...)`, `domains_create(...)`, `domains_delete(...)`, `domains_get(...)`, `domains_list(...)`, `domains_update(...)`, `get_config(...)`, `releases_create(...)`, `releases_list(...)`, `update_config(...)`, `versions_clone(...)`, `versions_create(...)`, `versions_delete(...)`, `versions_files_list(...)`, `versions_list(...)`, `versions_patch(...)` and `versions_populate_files(...)`
/// // to build up your call.
/// let rb = hub.sites();
/// # }
/// ```
pub struct SiteMethods<'a>
where {
hub: &'a FirebaseHosting<>,
}
impl<'a> client::MethodsBuilder for SiteMethods<'a> {}
impl<'a> SiteMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn channels_releases_create(&self, request: Release, parent: &str) -> SiteChannelReleaseCreateCall<'a> {
SiteChannelReleaseCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_name: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn channels_releases_list(&self, parent: &str) -> SiteChannelReleaseListCall<'a> {
SiteChannelReleaseListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new channel in the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site in which to create this channel, in the format: sites/ SITE_ID
pub fn channels_create(&self, request: Channel, parent: &str) -> SiteChannelCreateCall<'a> {
SiteChannelCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_channel_id: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the specified channel of the specified site. The `live` channel cannot be deleted.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
pub fn channels_delete(&self, name: &str) -> SiteChannelDeleteCall<'a> {
SiteChannelDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Retrieves information for the specified channel of the specified site.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
pub fn channels_get(&self, name: &str) -> SiteChannelGetCall<'a> {
SiteChannelGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the channels for the specified site. All sites have a default `live` channel.
///
/// # Arguments
///
/// * `parent` - Required. The site for which to list channels, in the format: sites/SITE_ID
pub fn channels_list(&self, parent: &str) -> SiteChannelListCall<'a> {
SiteChannelListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates information for the specified channel of the specified site. Implicitly creates the channel if it doesn't already exist.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - The fully-qualified resource name for the channel, in the format: sites/ SITE_ID/channels/CHANNEL_ID
pub fn channels_patch(&self, request: Channel, name: &str) -> SiteChannelPatchCall<'a> {
SiteChannelPatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a domain mapping on the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The parent to create the domain association for, in the format: sites/site-name
pub fn domains_create(&self, request: Domain, parent: &str) -> SiteDomainCreateCall<'a> {
SiteDomainCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the existing domain mapping on the specified site.
///
/// # Arguments
///
/// * `name` - Required. The name of the domain association to delete.
pub fn domains_delete(&self, name: &str) -> SiteDomainDeleteCall<'a> {
SiteDomainDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets a domain mapping on the specified site.
///
/// # Arguments
///
/// * `name` - Required. The name of the domain configuration to get.
pub fn domains_get(&self, name: &str) -> SiteDomainGetCall<'a> {
SiteDomainGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the domains for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The parent for which to list domains, in the format: sites/ site-name
pub fn domains_list(&self, parent: &str) -> SiteDomainListCall<'a> {
SiteDomainListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates the specified domain mapping, creating the mapping as if it does not exist.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Required. The name of the domain association to update or create, if an association doesn't already exist.
pub fn domains_update(&self, request: Domain, name: &str) -> SiteDomainUpdateCall<'a> {
SiteDomainUpdateCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn releases_create(&self, request: Release, parent: &str) -> SiteReleaseCreateCall<'a> {
SiteReleaseCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_name: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn releases_list(&self, parent: &str) -> SiteReleaseListCall<'a> {
SiteReleaseListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the remaining files to be uploaded for the specified version.
///
/// # Arguments
///
/// * `parent` - Required. The version for which to list files, in the format: sites/SITE_ID /versions/VERSION_ID
pub fn versions_files_list(&self, parent: &str) -> SiteVersionFileListCall<'a> {
SiteVersionFileListCall {
hub: self.hub,
_parent: parent.to_string(),
_status: Default::default(),
_page_token: Default::default(),
_page_size: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new version on the specified target site using the content of the specified version.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The target site for the cloned version, in the format: sites/ SITE_ID
pub fn versions_clone(&self, request: CloneVersionRequest, parent: &str) -> SiteVersionCloneCall<'a> {
SiteVersionCloneCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Creates a new version for the specified site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The site in which to create the version, in the format: sites/ SITE_ID
pub fn | (&self, request: Version, parent: &str) -> SiteVersionCreateCall<'a> {
SiteVersionCreateCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_version_id: Default::default(),
_size_bytes: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes the specified version.
///
/// # Arguments
///
/// * `name` - Required. The fully-qualified resource name for the version, in the format: sites/SITE_ID/versions/VERSION_ID
pub fn versions_delete(&self, name: &str) -> SiteVersionDeleteCall<'a> {
SiteVersionDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the versions that have been created for the specified site. This list includes versions for both the default `live` channel and any active preview channels for the specified site.
///
/// # Arguments
///
/// * `parent` - Required. The site or channel for which to list versions, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
pub fn versions_list(&self, parent: &str) -> SiteVersionListCall<'a> {
SiteVersionListCall {
hub: self.hub,
_parent: parent.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_filter: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates the specified metadata for the specified version. This method will fail with `FAILED_PRECONDITION` in the event of an invalid state transition. The supported [state](../sites.versions#versionstatus) transitions for a version are from `CREATED` to `FINALIZED`. Use [`DeleteVersion`](delete) to set the status of a version to `DELETED`.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
pub fn versions_patch(&self, request: Version, name: &str) -> SiteVersionPatchCall<'a> {
SiteVersionPatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Adds content files to the specified version. Each file must be under 2 GB.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The version to which to add files, in the format: sites/SITE_ID /versions/VERSION_ID
pub fn versions_populate_files(&self, request: PopulateVersionFilesRequest, parent: &str) -> SiteVersionPopulateFileCall<'a> {
SiteVersionPopulateFileCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets the Hosting metadata for a specific site.
///
/// # Arguments
///
/// * `name` - Required. The site for which to get the SiteConfig, in the format: sites/ site-name/config
pub fn get_config(&self, name: &str) -> SiteGetConfigCall<'a> {
SiteGetConfigCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Sets the Hosting metadata for a specific site.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Required. The site for which to update the SiteConfig, in the format: sites/ site-name/config
pub fn update_config(&self, request: SiteConfig, name: &str) -> SiteUpdateConfigCall<'a> {
SiteUpdateConfigCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
///
/// A builder for the *operations.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().operations_get("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectOperationGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectOperationGetCall<'a> {}
impl<'a> ProjectOperationGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.operations.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The name of the operation resource.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectOperationGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectOperationGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectOperationGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectOperationGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// A builder for the *sites.channels.releases.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Release;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Release::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_releases_create(req, "parent")
/// .version_name("Lorem")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelReleaseCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Release,
_parent: String,
_version_name: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelReleaseCreateCall<'a> {}
impl<'a> ProjectSiteChannelReleaseCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Release)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.releases.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_name {
params.push(("versionName", value.to_string()));
}
for &field in ["alt", "parent", "versionName"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Release) -> ProjectSiteChannelReleaseCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteChannelReleaseCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The unique identifier for a version, in the format: sites/SITE_ID/versions/ VERSION_ID The SITE_ID in this version identifier must match the SITE_ID in the `parent` parameter. This query parameter must be empty if the `type` field in the request body is `SITE_DISABLE`.
///
/// Sets the *version name* query property to the given value.
pub fn version_name(mut self, new_value: &str) -> ProjectSiteChannelReleaseCreateCall<'a> {
self._version_name = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelReleaseCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelReleaseCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelReleaseCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *sites.channels.releases.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_releases_list("parent")
/// .page_token("eos")
/// .page_size(-4)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelReleaseListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelReleaseListCall<'a> {}
impl<'a> ProjectSiteChannelReleaseListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListReleasesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.releases.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteChannelReleaseListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `releases.list` or `channels.releases.list` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteChannelReleaseListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of releases to return. The service may return a lower number if fewer releases exist than this maximum number. If unspecified, defaults to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteChannelReleaseListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelReleaseListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelReleaseListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelReleaseListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new channel in the specified site.
///
/// A builder for the *sites.channels.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Channel;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Channel::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_create(req, "parent")
/// .channel_id("ipsum")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Channel,
_parent: String,
_channel_id: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelCreateCall<'a> {}
impl<'a> ProjectSiteChannelCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._channel_id {
params.push(("channelId", value.to_string()));
}
for &field in ["alt", "parent", "channelId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/channels";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Channel) -> ProjectSiteChannelCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site in which to create this channel, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteChannelCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// Required. Immutable. A unique ID within the site that identifies the channel.
///
/// Sets the *channel id* query property to the given value.
pub fn channel_id(mut self, new_value: &str) -> ProjectSiteChannelCreateCall<'a> {
self._channel_id = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the specified channel of the specified site. The `live` channel cannot be deleted.
///
/// A builder for the *sites.channels.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelDeleteCall<'a> {}
impl<'a> ProjectSiteChannelDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteChannelDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Retrieves information for the specified channel of the specified site.
///
/// A builder for the *sites.channels.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_get("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelGetCall<'a> {}
impl<'a> ProjectSiteChannelGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteChannelGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the channels for the specified site. All sites have a default `live` channel.
///
/// A builder for the *sites.channels.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_list("parent")
/// .page_token("ipsum")
/// .page_size(-93)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelListCall<'a> {}
impl<'a> ProjectSiteChannelListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListChannelsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/channels";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site for which to list channels, in the format: sites/SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteChannelListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `ListChannels` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteChannelListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of channels to return. The service may return a lower number if fewer channels exist than this maximum number. If unspecified, defaults to 10. The maximum value is 100; values above 100 will be coerced to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteChannelListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates information for the specified channel of the specified site. Implicitly creates the channel if it doesn't already exist.
///
/// A builder for the *sites.channels.patch* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Channel;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Channel::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_channels_patch(req, "name")
/// .update_mask("gubergren")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteChannelPatchCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Channel,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteChannelPatchCall<'a> {}
impl<'a> ProjectSiteChannelPatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.channels.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Channel) -> ProjectSiteChannelPatchCall<'a> {
self._request = new_value;
self
}
/// The fully-qualified resource name for the channel, in the format: sites/ SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteChannelPatchCall<'a> {
self._name = new_value.to_string();
self
}
/// A comma-separated list of fields to be updated in this request.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> ProjectSiteChannelPatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteChannelPatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteChannelPatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteChannelPatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a domain mapping on the specified site.
///
/// A builder for the *sites.domains.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Domain;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Domain::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_domains_create(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDomainCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Domain,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDomainCreateCall<'a> {}
impl<'a> ProjectSiteDomainCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.domains.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/domains";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Domain) -> ProjectSiteDomainCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The parent to create the domain association for, in the format: sites/site-name
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteDomainCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDomainCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDomainCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDomainCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the existing domain mapping on the specified site.
///
/// A builder for the *sites.domains.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_domains_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDomainDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDomainDeleteCall<'a> {}
impl<'a> ProjectSiteDomainDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.domains.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The name of the domain association to delete.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteDomainDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDomainDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDomainDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDomainDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets a domain mapping on the specified site.
///
/// A builder for the *sites.domains.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_domains_get("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDomainGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDomainGetCall<'a> {}
impl<'a> ProjectSiteDomainGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.domains.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The name of the domain configuration to get.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteDomainGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDomainGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDomainGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDomainGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the domains for the specified site.
///
/// A builder for the *sites.domains.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_domains_list("parent")
/// .page_token("est")
/// .page_size(-62)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDomainListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDomainListCall<'a> {}
impl<'a> ProjectSiteDomainListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListDomainsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.domains.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/domains";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The parent for which to list domains, in the format: sites/ site-name
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteDomainListCall<'a> {
self._parent = new_value.to_string();
self
}
/// The next_page_token from a previous request, if provided.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteDomainListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The page size to return. Defaults to 50.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteDomainListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDomainListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDomainListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDomainListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates the specified domain mapping, creating the mapping as if it does not exist.
///
/// A builder for the *sites.domains.update* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Domain;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Domain::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_domains_update(req, "name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDomainUpdateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Domain,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDomainUpdateCall<'a> {}
impl<'a> ProjectSiteDomainUpdateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.domains.update",
http_method: hyper::Method::PUT });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PUT).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Domain) -> ProjectSiteDomainUpdateCall<'a> {
self._request = new_value;
self
}
/// Required. The name of the domain association to update or create, if an association doesn't already exist.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteDomainUpdateCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDomainUpdateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDomainUpdateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDomainUpdateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// A builder for the *sites.releases.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Release;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Release::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_releases_create(req, "parent")
/// .version_name("Lorem")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteReleaseCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Release,
_parent: String,
_version_name: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteReleaseCreateCall<'a> {}
impl<'a> ProjectSiteReleaseCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Release)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.releases.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_name {
params.push(("versionName", value.to_string()));
}
for &field in ["alt", "parent", "versionName"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Release) -> ProjectSiteReleaseCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteReleaseCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The unique identifier for a version, in the format: sites/SITE_ID/versions/ VERSION_ID The SITE_ID in this version identifier must match the SITE_ID in the `parent` parameter. This query parameter must be empty if the `type` field in the request body is `SITE_DISABLE`.
///
/// Sets the *version name* query property to the given value.
pub fn version_name(mut self, new_value: &str) -> ProjectSiteReleaseCreateCall<'a> {
self._version_name = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteReleaseCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteReleaseCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteReleaseCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *sites.releases.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_releases_list("parent")
/// .page_token("labore")
/// .page_size(-43)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteReleaseListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteReleaseListCall<'a> {}
impl<'a> ProjectSiteReleaseListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListReleasesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.releases.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteReleaseListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `releases.list` or `channels.releases.list` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteReleaseListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of releases to return. The service may return a lower number if fewer releases exist than this maximum number. If unspecified, defaults to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteReleaseListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteReleaseListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteReleaseListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteReleaseListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the remaining files to be uploaded for the specified version.
///
/// A builder for the *sites.versions.files.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_files_list("parent")
/// .status("sed")
/// .page_token("no")
/// .page_size(-15)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionFileListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_status: Option<String>,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionFileListCall<'a> {}
impl<'a> ProjectSiteVersionFileListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListVersionFilesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.files.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._status {
params.push(("status", value.to_string()));
}
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "status", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/files";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The version for which to list files, in the format: sites/SITE_ID /versions/VERSION_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteVersionFileListCall<'a> {
self._parent = new_value.to_string();
self
}
/// The type of files that should be listed for the specified version.
///
/// Sets the *status* query property to the given value.
pub fn status(mut self, new_value: &str) -> ProjectSiteVersionFileListCall<'a> {
self._status = Some(new_value.to_string());
self
}
/// A token from a previous call to `ListVersionFiles` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteVersionFileListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of version files to return. The service may return a lower number if fewer version files exist than this maximum number. If unspecified, defaults to 1000.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteVersionFileListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionFileListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionFileListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionFileListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new version on the specified target site using the content of the specified version.
///
/// A builder for the *sites.versions.clone* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::CloneVersionRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = CloneVersionRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_clone(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionCloneCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: CloneVersionRequest,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionCloneCall<'a> {}
impl<'a> ProjectSiteVersionCloneCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.clone",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions:clone";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: CloneVersionRequest) -> ProjectSiteVersionCloneCall<'a> {
self._request = new_value;
self
}
/// Required. The target site for the cloned version, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteVersionCloneCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionCloneCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionCloneCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionCloneCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new version for the specified site.
///
/// A builder for the *sites.versions.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Version;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Version::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_create(req, "parent")
/// .version_id("sed")
/// .size_bytes("et")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Version,
_parent: String,
_version_id: Option<String>,
_size_bytes: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionCreateCall<'a> {}
impl<'a> ProjectSiteVersionCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Version)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_id {
params.push(("versionId", value.to_string()));
}
if let Some(value) = self._size_bytes {
params.push(("sizeBytes", value.to_string()));
}
for &field in ["alt", "parent", "versionId", "sizeBytes"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Version) -> ProjectSiteVersionCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site in which to create the version, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteVersionCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// A unique id for the new version. This is was only specified for legacy version creations, and should be blank.
///
/// Sets the *version id* query property to the given value.
pub fn version_id(mut self, new_value: &str) -> ProjectSiteVersionCreateCall<'a> {
self._version_id = Some(new_value.to_string());
self
}
/// The self-reported size of the version. This value is used for a pre-emptive quota check for legacy version uploads.
///
/// Sets the *size bytes* query property to the given value.
pub fn size_bytes(mut self, new_value: &str) -> ProjectSiteVersionCreateCall<'a> {
self._size_bytes = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the specified version.
///
/// A builder for the *sites.versions.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionDeleteCall<'a> {}
impl<'a> ProjectSiteVersionDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the version, in the format: sites/SITE_ID/versions/VERSION_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteVersionDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the versions that have been created for the specified site. This list includes versions for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *sites.versions.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_list("parent")
/// .page_token("erat")
/// .page_size(-93)
/// .filter("duo")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_filter: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionListCall<'a> {}
impl<'a> ProjectSiteVersionListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListVersionsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
if let Some(value) = self._filter {
params.push(("filter", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize", "filter"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list versions, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteVersionListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `ListVersions` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteVersionListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of versions to return. The service may return a lower number if fewer versions exist than this maximum number. If unspecified, defaults to 25. The maximum value is 100; values above 100 will be coerced to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteVersionListCall<'a> {
self._page_size = Some(new_value);
self
}
/// A filter string used to return a subset of versions in the response. The currently supported fields for filtering are: `name`, `status`, and `create_time`. Learn more about filtering in Google's [AIP 160 standard](https://google.aip.dev/160).
///
/// Sets the *filter* query property to the given value.
pub fn filter(mut self, new_value: &str) -> ProjectSiteVersionListCall<'a> {
self._filter = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates the specified metadata for the specified version. This method will fail with `FAILED_PRECONDITION` in the event of an invalid state transition. The supported [state](../sites.versions#versionstatus) transitions for a version are from `CREATED` to `FINALIZED`. Use [`DeleteVersion`](delete) to set the status of a version to `DELETED`.
///
/// A builder for the *sites.versions.patch* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Version;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Version::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_patch(req, "name")
/// .update_mask("et")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionPatchCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Version,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionPatchCall<'a> {}
impl<'a> ProjectSiteVersionPatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Version)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Version) -> ProjectSiteVersionPatchCall<'a> {
self._request = new_value;
self
}
/// The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteVersionPatchCall<'a> {
self._name = new_value.to_string();
self
}
/// A set of field names from your [version](../sites.versions) that you want to update. A field will be overwritten if, and only if, it's in the mask. If a mask is not provided then a default mask of only [`status`](../sites.versions#Version.FIELDS.status) will be used.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> ProjectSiteVersionPatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionPatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionPatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionPatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Adds content files to the specified version. Each file must be under 2 GB.
///
/// A builder for the *sites.versions.populateFiles* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::PopulateVersionFilesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = PopulateVersionFilesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_versions_populate_files(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteVersionPopulateFileCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: PopulateVersionFilesRequest,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteVersionPopulateFileCall<'a> {}
impl<'a> ProjectSiteVersionPopulateFileCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, PopulateVersionFilesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.versions.populateFiles",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}:populateFiles";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: PopulateVersionFilesRequest) -> ProjectSiteVersionPopulateFileCall<'a> {
self._request = new_value;
self
}
/// Required. The version to which to add files, in the format: sites/SITE_ID /versions/VERSION_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteVersionPopulateFileCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteVersionPopulateFileCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteVersionPopulateFileCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteVersionPopulateFileCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new Hosting Site in the specified parent Firebase project. Note that Hosting sites can take several minutes to propagate through Firebase systems.
///
/// A builder for the *sites.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Site;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Site::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_create(req, "parent")
/// .site_id("consetetur")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Site,
_parent: String,
_site_id: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteCreateCall<'a> {}
impl<'a> ProjectSiteCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Site)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._site_id {
params.push(("siteId", value.to_string()));
}
for &field in ["alt", "parent", "siteId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/sites";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Site) -> ProjectSiteCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The Firebase project in which to create a Hosting site, in the format: projects/PROJECT_IDENTIFIER Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// Required. Immutable. A globally unique identifier for the Hosting site. This identifier is used to construct the Firebase-provisioned subdomains for the site, so it must also be a valid domain name label.
///
/// Sets the *site id* query property to the given value.
pub fn site_id(mut self, new_value: &str) -> ProjectSiteCreateCall<'a> {
self._site_id = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the specified Hosting Site from the specified parent Firebase project.
///
/// A builder for the *sites.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteDeleteCall<'a> {}
impl<'a> ProjectSiteDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets the specified Hosting Site.
///
/// A builder for the *sites.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_get("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteGetCall<'a> {}
impl<'a> ProjectSiteGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Site)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values. Since a SITE_ID is a globally unique identifier, you can also use the unique sub-collection resource access pattern, in the format: projects/-/sites/SITE_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets the Hosting metadata for a specific site.
///
/// A builder for the *sites.getConfig* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_get_config("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteGetConfigCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteGetConfigCall<'a> {}
impl<'a> ProjectSiteGetConfigCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SiteConfig)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.getConfig",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site for which to get the SiteConfig, in the format: sites/ site-name/config
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteGetConfigCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteGetConfigCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteGetConfigCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteGetConfigCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists each Hosting Site associated with the specified parent Firebase project.
///
/// A builder for the *sites.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_list("parent")
/// .page_token("sadipscing")
/// .page_size(-15)
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteListCall<'a> {}
impl<'a> ProjectSiteListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListSitesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/sites";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The Firebase project for which to list sites, in the format: projects/PROJECT_IDENTIFIER Refer to the `Site` [`name`](../projects#Site.FIELDS.name) field for details about PROJECT_IDENTIFIER values.
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> ProjectSiteListCall<'a> {
self._parent = new_value.to_string();
self
}
/// Optional. A token from a previous call to `ListSites` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectSiteListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// Optional. The maximum number of sites to return. The service may return a lower number if fewer sites exist than this maximum number. If unspecified, defaults to 40.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectSiteListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates attributes of the specified Hosting Site.
///
/// A builder for the *sites.patch* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Site;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Site::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_patch(req, "name")
/// .update_mask("duo")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSitePatchCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Site,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSitePatchCall<'a> {}
impl<'a> ProjectSitePatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Site)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Site) -> ProjectSitePatchCall<'a> {
self._request = new_value;
self
}
/// Output only. The fully-qualified resource name of the Hosting site, in the format: projects/PROJECT_IDENTIFIER/sites/SITE_ID PROJECT_IDENTIFIER: the Firebase project's [`ProjectNumber`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_number) ***(recommended)*** or its [`ProjectId`](https://firebase.google.com/docs/projects/api/reference/rest/v1beta1/projects#FirebaseProject.FIELDS.project_id). Learn more about using project identifiers in Google's [AIP 2510 standard](https://google.aip.dev/cloud/2510).
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSitePatchCall<'a> {
self._name = new_value.to_string();
self
}
/// A set of field names from your Site that you want to update.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> ProjectSitePatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSitePatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSitePatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSitePatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Sets the Hosting metadata for a specific site.
///
/// A builder for the *sites.updateConfig* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::SiteConfig;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SiteConfig::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().sites_update_config(req, "name")
/// .update_mask("vero")
/// .doit().await;
/// # }
/// ```
pub struct ProjectSiteUpdateConfigCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: SiteConfig,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectSiteUpdateConfigCall<'a> {}
impl<'a> ProjectSiteUpdateConfigCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SiteConfig)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.projects.sites.updateConfig",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: SiteConfig) -> ProjectSiteUpdateConfigCall<'a> {
self._request = new_value;
self
}
/// Required. The site for which to update the SiteConfig, in the format: sites/ site-name/config
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectSiteUpdateConfigCall<'a> {
self._name = new_value.to_string();
self
}
/// A set of field names from your [site configuration](../sites.SiteConfig) that you want to update. A field will be overwritten if, and only if, it's in the mask. If a mask is not provided then a default mask of only [`max_versions`](../sites.SiteConfig.max_versions) will be used.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> ProjectSiteUpdateConfigCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectSiteUpdateConfigCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectSiteUpdateConfigCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectSiteUpdateConfigCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// A builder for the *channels.releases.create* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Release;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Release::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_releases_create(req, "parent")
/// .version_name("Stet")
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelReleaseCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Release,
_parent: String,
_version_name: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelReleaseCreateCall<'a> {}
impl<'a> SiteChannelReleaseCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Release)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.releases.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_name {
params.push(("versionName", value.to_string()));
}
for &field in ["alt", "parent", "versionName"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Release) -> SiteChannelReleaseCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteChannelReleaseCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The unique identifier for a version, in the format: sites/SITE_ID/versions/ VERSION_ID The SITE_ID in this version identifier must match the SITE_ID in the `parent` parameter. This query parameter must be empty if the `type` field in the request body is `SITE_DISABLE`.
///
/// Sets the *version name* query property to the given value.
pub fn version_name(mut self, new_value: &str) -> SiteChannelReleaseCreateCall<'a> {
self._version_name = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelReleaseCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelReleaseCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelReleaseCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *channels.releases.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_releases_list("parent")
/// .page_token("elitr")
/// .page_size(-6)
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelReleaseListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelReleaseListCall<'a> {}
impl<'a> SiteChannelReleaseListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListReleasesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.releases.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteChannelReleaseListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `releases.list` or `channels.releases.list` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteChannelReleaseListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of releases to return. The service may return a lower number if fewer releases exist than this maximum number. If unspecified, defaults to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteChannelReleaseListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelReleaseListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelReleaseListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelReleaseListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new channel in the specified site.
///
/// A builder for the *channels.create* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Channel;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Channel::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_create(req, "parent")
/// .channel_id("no")
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Channel,
_parent: String,
_channel_id: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelCreateCall<'a> {}
impl<'a> SiteChannelCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._channel_id {
params.push(("channelId", value.to_string()));
}
for &field in ["alt", "parent", "channelId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/channels";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Channel) -> SiteChannelCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site in which to create this channel, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteChannelCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// Required. Immutable. A unique ID within the site that identifies the channel.
///
/// Sets the *channel id* query property to the given value.
pub fn channel_id(mut self, new_value: &str) -> SiteChannelCreateCall<'a> {
self._channel_id = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the specified channel of the specified site. The `live` channel cannot be deleted.
///
/// A builder for the *channels.delete* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelDeleteCall<'a> {}
impl<'a> SiteChannelDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteChannelDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Retrieves information for the specified channel of the specified site.
///
/// A builder for the *channels.get* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_get("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelGetCall<'a> {}
impl<'a> SiteChannelGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the channel, in the format: sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteChannelGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the channels for the specified site. All sites have a default `live` channel.
///
/// A builder for the *channels.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_list("parent")
/// .page_token("consetetur")
/// .page_size(-28)
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelListCall<'a> {}
impl<'a> SiteChannelListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListChannelsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/channels";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site for which to list channels, in the format: sites/SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteChannelListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `ListChannels` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteChannelListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of channels to return. The service may return a lower number if fewer channels exist than this maximum number. If unspecified, defaults to 10. The maximum value is 100; values above 100 will be coerced to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteChannelListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates information for the specified channel of the specified site. Implicitly creates the channel if it doesn't already exist.
///
/// A builder for the *channels.patch* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Channel;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Channel::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().channels_patch(req, "name")
/// .update_mask("erat")
/// .doit().await;
/// # }
/// ```
pub struct SiteChannelPatchCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Channel,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteChannelPatchCall<'a> {}
impl<'a> SiteChannelPatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Channel)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.channels.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Channel) -> SiteChannelPatchCall<'a> {
self._request = new_value;
self
}
/// The fully-qualified resource name for the channel, in the format: sites/ SITE_ID/channels/CHANNEL_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteChannelPatchCall<'a> {
self._name = new_value.to_string();
self
}
/// A comma-separated list of fields to be updated in this request.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> SiteChannelPatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteChannelPatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteChannelPatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteChannelPatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a domain mapping on the specified site.
///
/// A builder for the *domains.create* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Domain;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Domain::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().domains_create(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct SiteDomainCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Domain,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteDomainCreateCall<'a> {}
impl<'a> SiteDomainCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.domains.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/domains";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Domain) -> SiteDomainCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The parent to create the domain association for, in the format: sites/site-name
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteDomainCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteDomainCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteDomainCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteDomainCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the existing domain mapping on the specified site.
///
/// A builder for the *domains.delete* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().domains_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteDomainDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteDomainDeleteCall<'a> {}
impl<'a> SiteDomainDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.domains.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The name of the domain association to delete.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteDomainDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteDomainDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteDomainDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteDomainDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets a domain mapping on the specified site.
///
/// A builder for the *domains.get* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().domains_get("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteDomainGetCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteDomainGetCall<'a> {}
impl<'a> SiteDomainGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.domains.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The name of the domain configuration to get.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteDomainGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteDomainGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteDomainGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteDomainGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the domains for the specified site.
///
/// A builder for the *domains.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().domains_list("parent")
/// .page_token("dolores")
/// .page_size(-62)
/// .doit().await;
/// # }
/// ```
pub struct SiteDomainListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteDomainListCall<'a> {}
impl<'a> SiteDomainListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListDomainsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.domains.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/domains";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The parent for which to list domains, in the format: sites/ site-name
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteDomainListCall<'a> {
self._parent = new_value.to_string();
self
}
/// The next_page_token from a previous request, if provided.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteDomainListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The page size to return. Defaults to 50.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteDomainListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteDomainListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteDomainListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteDomainListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates the specified domain mapping, creating the mapping as if it does not exist.
///
/// A builder for the *domains.update* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Domain;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Domain::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().domains_update(req, "name")
/// .doit().await;
/// # }
/// ```
pub struct SiteDomainUpdateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Domain,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteDomainUpdateCall<'a> {}
impl<'a> SiteDomainUpdateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Domain)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.domains.update",
http_method: hyper::Method::PUT });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PUT).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Domain) -> SiteDomainUpdateCall<'a> {
self._request = new_value;
self
}
/// Required. The name of the domain association to update or create, if an association doesn't already exist.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteDomainUpdateCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteDomainUpdateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteDomainUpdateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteDomainUpdateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new release, which makes the content of the specified version actively display on the appropriate URL(s).
///
/// A builder for the *releases.create* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Release;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Release::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().releases_create(req, "parent")
/// .version_name("voluptua.")
/// .doit().await;
/// # }
/// ```
pub struct SiteReleaseCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Release,
_parent: String,
_version_name: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteReleaseCreateCall<'a> {}
impl<'a> SiteReleaseCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Release)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.releases.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_name {
params.push(("versionName", value.to_string()));
}
for &field in ["alt", "parent", "versionName"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Release) -> SiteReleaseCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site or channel to which the release belongs, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteReleaseCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// The unique identifier for a version, in the format: sites/SITE_ID/versions/ VERSION_ID The SITE_ID in this version identifier must match the SITE_ID in the `parent` parameter. This query parameter must be empty if the `type` field in the request body is `SITE_DISABLE`.
///
/// Sets the *version name* query property to the given value.
pub fn version_name(mut self, new_value: &str) -> SiteReleaseCreateCall<'a> {
self._version_name = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteReleaseCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteReleaseCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteReleaseCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the releases that have been created for the specified site or channel. When used to list releases for a site, this list includes releases for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *releases.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().releases_list("parent")
/// .page_token("dolore")
/// .page_size(-34)
/// .doit().await;
/// # }
/// ```
pub struct SiteReleaseListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteReleaseListCall<'a> {}
impl<'a> SiteReleaseListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListReleasesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.releases.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/releases";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list releases, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteReleaseListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `releases.list` or `channels.releases.list` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteReleaseListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of releases to return. The service may return a lower number if fewer releases exist than this maximum number. If unspecified, defaults to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteReleaseListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteReleaseListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteReleaseListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteReleaseListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the remaining files to be uploaded for the specified version.
///
/// A builder for the *versions.files.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_files_list("parent")
/// .status("amet.")
/// .page_token("ea")
/// .page_size(-95)
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionFileListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_status: Option<String>,
_page_token: Option<String>,
_page_size: Option<i32>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionFileListCall<'a> {}
impl<'a> SiteVersionFileListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListVersionFilesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.files.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._status {
params.push(("status", value.to_string()));
}
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
for &field in ["alt", "parent", "status", "pageToken", "pageSize"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/files";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The version for which to list files, in the format: sites/SITE_ID /versions/VERSION_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteVersionFileListCall<'a> {
self._parent = new_value.to_string();
self
}
/// The type of files that should be listed for the specified version.
///
/// Sets the *status* query property to the given value.
pub fn status(mut self, new_value: &str) -> SiteVersionFileListCall<'a> {
self._status = Some(new_value.to_string());
self
}
/// A token from a previous call to `ListVersionFiles` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteVersionFileListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of version files to return. The service may return a lower number if fewer version files exist than this maximum number. If unspecified, defaults to 1000.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteVersionFileListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionFileListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionFileListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionFileListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new version on the specified target site using the content of the specified version.
///
/// A builder for the *versions.clone* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::CloneVersionRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = CloneVersionRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_clone(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionCloneCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: CloneVersionRequest,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionCloneCall<'a> {}
impl<'a> SiteVersionCloneCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Operation)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.clone",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions:clone";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: CloneVersionRequest) -> SiteVersionCloneCall<'a> {
self._request = new_value;
self
}
/// Required. The target site for the cloned version, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteVersionCloneCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionCloneCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionCloneCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionCloneCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Creates a new version for the specified site.
///
/// A builder for the *versions.create* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Version;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Version::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_create(req, "parent")
/// .version_id("no")
/// .size_bytes("est")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionCreateCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Version,
_parent: String,
_version_id: Option<String>,
_size_bytes: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionCreateCall<'a> {}
impl<'a> SiteVersionCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Version)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._version_id {
params.push(("versionId", value.to_string()));
}
if let Some(value) = self._size_bytes {
params.push(("sizeBytes", value.to_string()));
}
for &field in ["alt", "parent", "versionId", "sizeBytes"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Version) -> SiteVersionCreateCall<'a> {
self._request = new_value;
self
}
/// Required. The site in which to create the version, in the format: sites/ SITE_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteVersionCreateCall<'a> {
self._parent = new_value.to_string();
self
}
/// A unique id for the new version. This is was only specified for legacy version creations, and should be blank.
///
/// Sets the *version id* query property to the given value.
pub fn version_id(mut self, new_value: &str) -> SiteVersionCreateCall<'a> {
self._version_id = Some(new_value.to_string());
self
}
/// The self-reported size of the version. This value is used for a pre-emptive quota check for legacy version uploads.
///
/// Sets the *size bytes* query property to the given value.
pub fn size_bytes(mut self, new_value: &str) -> SiteVersionCreateCall<'a> {
self._size_bytes = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes the specified version.
///
/// A builder for the *versions.delete* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionDeleteCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionDeleteCall<'a> {}
impl<'a> SiteVersionDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fully-qualified resource name for the version, in the format: sites/SITE_ID/versions/VERSION_ID
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteVersionDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the versions that have been created for the specified site. This list includes versions for both the default `live` channel and any active preview channels for the specified site.
///
/// A builder for the *versions.list* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_list("parent")
/// .page_token("sit")
/// .page_size(-35)
/// .filter("tempor")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionListCall<'a>
where {
hub: &'a FirebaseHosting<>,
_parent: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_filter: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionListCall<'a> {}
impl<'a> SiteVersionListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListVersionsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
if let Some(value) = self._filter {
params.push(("filter", value.to_string()));
}
for &field in ["alt", "parent", "pageToken", "pageSize", "filter"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}/versions";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site or channel for which to list versions, in either of the following formats: - sites/SITE_ID - sites/SITE_ID/channels/CHANNEL_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteVersionListCall<'a> {
self._parent = new_value.to_string();
self
}
/// A token from a previous call to `ListVersions` that tells the server where to resume listing.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> SiteVersionListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of versions to return. The service may return a lower number if fewer versions exist than this maximum number. If unspecified, defaults to 25. The maximum value is 100; values above 100 will be coerced to 100.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> SiteVersionListCall<'a> {
self._page_size = Some(new_value);
self
}
/// A filter string used to return a subset of versions in the response. The currently supported fields for filtering are: `name`, `status`, and `create_time`. Learn more about filtering in Google's [AIP 160 standard](https://google.aip.dev/160).
///
/// Sets the *filter* query property to the given value.
pub fn filter(mut self, new_value: &str) -> SiteVersionListCall<'a> {
self._filter = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates the specified metadata for the specified version. This method will fail with `FAILED_PRECONDITION` in the event of an invalid state transition. The supported [state](../sites.versions#versionstatus) transitions for a version are from `CREATED` to `FINALIZED`. Use [`DeleteVersion`](delete) to set the status of a version to `DELETED`.
///
/// A builder for the *versions.patch* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::Version;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = Version::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_patch(req, "name")
/// .update_mask("ipsum")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionPatchCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: Version,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionPatchCall<'a> {}
impl<'a> SiteVersionPatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Version)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: Version) -> SiteVersionPatchCall<'a> {
self._request = new_value;
self
}
/// The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteVersionPatchCall<'a> {
self._name = new_value.to_string();
self
}
/// A set of field names from your [version](../sites.versions) that you want to update. A field will be overwritten if, and only if, it's in the mask. If a mask is not provided then a default mask of only [`status`](../sites.versions#Version.FIELDS.status) will be used.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> SiteVersionPatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionPatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionPatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionPatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Adds content files to the specified version. Each file must be under 2 GB.
///
/// A builder for the *versions.populateFiles* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::PopulateVersionFilesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = PopulateVersionFilesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().versions_populate_files(req, "parent")
/// .doit().await;
/// # }
/// ```
pub struct SiteVersionPopulateFileCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: PopulateVersionFilesRequest,
_parent: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteVersionPopulateFileCall<'a> {}
impl<'a> SiteVersionPopulateFileCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, PopulateVersionFilesResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.versions.populateFiles",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
for &field in ["alt", "parent"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+parent}:populateFiles";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: PopulateVersionFilesRequest) -> SiteVersionPopulateFileCall<'a> {
self._request = new_value;
self
}
/// Required. The version to which to add files, in the format: sites/SITE_ID /versions/VERSION_ID
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> SiteVersionPopulateFileCall<'a> {
self._parent = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteVersionPopulateFileCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteVersionPopulateFileCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteVersionPopulateFileCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets the Hosting metadata for a specific site.
///
/// A builder for the *getConfig* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().get_config("name")
/// .doit().await;
/// # }
/// ```
pub struct SiteGetConfigCall<'a>
where {
hub: &'a FirebaseHosting<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteGetConfigCall<'a> {}
impl<'a> SiteGetConfigCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SiteConfig)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.getConfig",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::FirebaseReadonly.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The site for which to get the SiteConfig, in the format: sites/ site-name/config
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteGetConfigCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteGetConfigCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteGetConfigCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::FirebaseReadonly`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteGetConfigCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Sets the Hosting metadata for a specific site.
///
/// A builder for the *updateConfig* method supported by a *site* resource.
/// It is not used directly, but through a `SiteMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_firebasehosting1_beta1 as firebasehosting1_beta1;
/// use firebasehosting1_beta1::api::SiteConfig;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use firebasehosting1_beta1::FirebaseHosting;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = FirebaseHosting::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SiteConfig::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.sites().update_config(req, "name")
/// .update_mask("est")
/// .doit().await;
/// # }
/// ```
pub struct SiteUpdateConfigCall<'a>
where {
hub: &'a FirebaseHosting<>,
_request: SiteConfig,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for SiteUpdateConfigCall<'a> {}
impl<'a> SiteUpdateConfigCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SiteConfig)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "firebasehosting.sites.updateConfig",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1beta1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: SiteConfig) -> SiteUpdateConfigCall<'a> {
self._request = new_value;
self
}
/// Required. The site for which to update the SiteConfig, in the format: sites/ site-name/config
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> SiteUpdateConfigCall<'a> {
self._name = new_value.to_string();
self
}
/// A set of field names from your [site configuration](../sites.SiteConfig) that you want to update. A field will be overwritten if, and only if, it's in the mask. If a mask is not provided then a default mask of only [`max_versions`](../sites.SiteConfig.max_versions) will be used.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> SiteUpdateConfigCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SiteUpdateConfigCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> SiteUpdateConfigCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> SiteUpdateConfigCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
| versions_create |
options.ts | import AdminBro, { AdminBroOptions } from 'admin-bro'
import * as UserAdmin from './resources/user' | export const options: AdminBroOptions = {
rootPath,
version: {
admin: true,
},
dashboard: {
handler: async () => {
return { some: 'output' }
},
component: AdminBro.bundle('../../../src/admin/components/dashboard')
},
resources: [
UserAdmin,
MerchantAdmin
],
} | import * as MerchantAdmin from './resources/merchant'
const rootPath = '/admin'
|
token_test.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"sync/atomic"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
)
const (
tokenExpectedRegex = "^\\S{6}\\.\\S{16}\n$"
testConfigToken = `apiVersion: v1
clusters:
- cluster:
certificate-authority-data:
server: localhost:8000
name: prod
contexts:
- context:
cluster: prod
namespace: default
user: default-service-account
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data:
client-key-data:
`
testConfigTokenCertAuthorityData = "certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01USXhOREUxTlRFek1Gb1hEVEkzTVRJeE1qRTFOVEV6TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlZrCnNkT0NjRDBIOG9ycXZ5djBEZ09jZEpjRGc4aTJPNGt3QVpPOWZUanJGRHJqbDZlVXRtdlMyZ1lZd0c4TGhPV2gKb0lkZ3AvbVkrbVlDakliUUJtTmE2Ums1V2JremhJRzM1c1lseE9NVUJJR0xXMzN0RTh4SlR1RVd3V0NmZnpLcQpyaU1UT1A3REF3MUxuM2xUNlpJNGRNM09NOE1IUk9Wd3lRMDVpbWo5eUx5R1lYdTlvSncwdTVXWVpFYmpUL3VpCjJBZ2QwVDMrZGFFb044aVBJOTlVQkQxMzRkc2VGSEJEY3hHcmsvVGlQdHBpSC9IOGoxRWZaYzRzTGlONzJmL2YKYUpacTROSHFiT2F5UkpITCtJejFNTW1DRkN3cjdHOHVENWVvWWp2dEdZN2xLc1pBTlUwK3VlUnJsTitxTzhQWQpxaTZNMDFBcmV1UzFVVHFuTkM4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFNbXo4Nm9LMmFLa0owMnlLSC9ZcTlzaDZZcDEKYmhLS25mMFJCaTA1clRacWdhTi9oTnROdmQxSzJxZGRLNzhIT2pVdkpNRGp3NERieXF0Wll2V01XVFRCQnQrSgpPMGNyWkg5NXlqUW42YzRlcU1FTjFhOUFKNXRlclNnTDVhREJsK0FMTWxaNVpxTzBUOUJDdTJtNXV3dGNWaFZuCnh6cGpTT3V5WVdOQ3A5bW9mV2VPUTljNXhEcElWeUlMUkFvNmZ5Z2c3N25TSDN4ckVmd0VKUHFMd1RPYVk1bTcKeEZWWWJoR3dxUGU5V0I5aTR5cnNrZUFBWlpUSzdVbklKMXFkRmlHQk9aZlRtaDhYQ3BOTHZZcFBLQW9hWWlsRwpjOW1acVhpWVlESTV6R1IxMElpc2FWNXJUY2hDenNQVWRhQzRVbnpTZG01cTdKYTAyb0poQlU1TE1FMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
testConfigTokenNoCluster = `apiVersion: v1
clusters:
- cluster:
server:
name: prod
contexts:
- context:
namespace: default
user: default-service-account
name: default
kind: Config
preferences: {}
`
)
func TestRunGenerateToken(t *testing.T) {
var buf bytes.Buffer
err := RunGenerateToken(&buf)
if err != nil {
t.Errorf("RunGenerateToken returned an error: %v", err)
}
output := buf.String()
matched, err := regexp.MatchString(tokenExpectedRegex, output)
if err != nil {
t.Fatalf("Encountered an error while trying to match RunGenerateToken's output: %v", err)
}
if !matched {
t.Errorf("RunGenerateToken's output did not match expected regex; wanted: [%s], got: [%s]", tokenExpectedRegex, output)
}
}
func TestRunCreateToken(t *testing.T) {
var buf bytes.Buffer
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewNotFound(v1.Resource("secrets"), "foo")
})
testCases := []struct {
name string
token string
usages []string
extraGroups []string
printJoin bool
expectedError bool
}{
{
name: "valid: empty token",
token: "",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: false,
},
{
name: "valid: non-empty token",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: false,
},
{
name: "valid: no extraGroups",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{},
expectedError: false,
},
{
name: "invalid: incorrect extraGroups",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{"foo"},
expectedError: true,
},
{
name: "invalid: specifying --groups when --usages doesn't include authentication",
token: "abcdef.1234567890123456",
usages: []string{"signing"},
extraGroups: []string{"foo"},
expectedError: true,
},
{
name: "invalid: partially incorrect usages",
token: "abcdef.1234567890123456",
usages: []string{"foo", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: true,
},
{
name: "invalid: all incorrect usages",
token: "abcdef.1234567890123456",
usages: []string{"foo", "bar"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: true,
},
{
name: "invalid: print join command",
token: "",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
printJoin: true,
expectedError: true,
},
}
for _, tc := range testCases {
bts, err := kubeadmapiv1beta1.NewBootstrapTokenString(tc.token)
if err != nil && len(tc.token) != 0 { // if tc.token is "" it's okay as it will be generated later at runtime
t.Fatalf("token couldn't be parsed for testing: %v", err)
}
cfg := &kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
// KubernetesVersion is not used, but we set this explicitly to avoid
// the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
KubernetesVersion: "v1.11.0",
},
BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{
{
Token: bts,
TTL: &metav1.Duration{Duration: 0},
Usages: tc.usages,
Groups: tc.extraGroups,
},
},
}
err = RunCreateToken(&buf, fakeClient, "", cfg, tc.printJoin, "")
if (err != nil) != tc.expectedError {
t.Errorf("Test case %s: RunCreateToken expected error: %v, saw: %v", tc.name, tc.expectedError, (err != nil))
}
}
}
func TestNewCmdTokenGenerate(t *testing.T) {
var buf bytes.Buffer
args := []string{}
cmd := NewCmdTokenGenerate(&buf)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("Cannot execute token command: %v", err)
}
}
func TestNewCmdToken(t *testing.T) {
var buf, bufErr bytes.Buffer
testConfigTokenFile := "test-config-file"
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, testConfigTokenFile)
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
testCases := []struct {
name string
args []string
configToWrite string
kubeConfigEnv string
expectedError bool
}{
{
name: "valid: generate",
args: []string{"generate"},
configToWrite: "",
expectedError: false,
},
{
name: "valid: delete from --kubeconfig",
args: []string{"delete", "abcdef.1234567890123456", "--dry-run", "--kubeconfig=" + fullPath},
configToWrite: testConfigToken,
expectedError: false,
},
{
name: "valid: delete from " + clientcmd.RecommendedConfigPathEnvVar,
args: []string{"delete", "abcdef.1234567890123456", "--dry-run"},
configToWrite: testConfigToken,
kubeConfigEnv: fullPath,
expectedError: false,
},
}
for _, tc := range testCases {
// the command is created for each test so that the kubeConfigFile
// variable in NewCmdToken() is reset.
cmd := NewCmdToken(&buf, &bufErr)
if _, err = f.WriteString(tc.configToWrite); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
// store the current value of the environment variable.
storedEnv := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
if tc.kubeConfigEnv != "" {
os.Setenv(clientcmd.RecommendedConfigPathEnvVar, tc.kubeConfigEnv)
}
cmd.SetArgs(tc.args)
err := cmd.Execute()
if (err != nil) != tc.expectedError {
t.Errorf("Test case %q: NewCmdToken expected error: %v, saw: %v", tc.name, tc.expectedError, (err != nil))
}
// restore the environment variable.
os.Setenv(clientcmd.RecommendedConfigPathEnvVar, storedEnv)
}
}
func TestGetClientset(t *testing.T) {
testConfigTokenFile := "test-config-file"
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, testConfigTokenFile)
// test dryRun = false on a non-exisiting file
if _, err = getClientset(fullPath, false); err == nil {
t.Errorf("getClientset(); dry-run: false; did no fail for test file %q: %v", fullPath, err)
}
// test dryRun = true on a non-exisiting file
if _, err = getClientset(fullPath, true); err == nil {
t.Errorf("getClientset(); dry-run: true; did no fail for test file %q: %v", fullPath, err)
}
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
if _, err = f.WriteString(testConfigToken); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
// test dryRun = true on an exisiting file
if _, err = getClientset(fullPath, true); err != nil {
t.Errorf("getClientset(); dry-run: true; failed for test file %q: %v", fullPath, err)
}
}
func | (t *testing.T) {
var buf bytes.Buffer
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, "test-config-file")
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
if _, err = f.WriteString(testConfigToken); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
client, err := getClientset(fullPath, true)
if err != nil {
t.Errorf("Unable to run getClientset() for test file %q: %v", fullPath, err)
}
// test valid; should not fail
// for some reason Secrets().Delete() does not fail even for this dummy config
if err = RunDeleteToken(&buf, client, "abcdef.1234567890123456"); err != nil {
t.Errorf("RunDeleteToken() failed for a valid token: %v", err)
}
// test invalid token; should fail
if err = RunDeleteToken(&buf, client, "invalid-token"); err == nil {
t.Errorf("RunDeleteToken() succeeded for an invalid token: %v", err)
}
}
var httpTestItr uint32
var httpSentResponse uint32 = 1
func TestRunListTokens(t *testing.T) {
var err error
var bufOut, bufErr bytes.Buffer
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, "test-config-file")
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
// test config without secrets; should fail
if _, err = f.WriteString(testConfigToken); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
client, err := getClientset(fullPath, true)
if err != nil {
t.Errorf("Unable to run getClientset() for test file %q: %v", fullPath, err)
}
if err = RunListTokens(&bufOut, &bufErr, client); err == nil {
t.Errorf("RunListTokens() did not fail for a config without secrets: %v", err)
}
// test config without secrets but use a dummy API server that returns secrets
portString := "9008"
http.HandleFunc("/", httpHandler)
httpServer := &http.Server{Addr: "localhost:" + portString}
go func() {
err := httpServer.ListenAndServe()
if err != nil {
t.Errorf("Failed to start dummy API server: localhost:%s", portString)
}
}()
fmt.Printf("dummy API server listening on localhost:%s\n", portString)
testConfigTokenOpenPort := strings.Replace(testConfigToken, "server: localhost:8000", "server: localhost:"+portString, -1)
if _, err = f.WriteString(testConfigTokenOpenPort); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
client, err = getClientset(fullPath, true)
if err != nil {
t.Errorf("Unable to run getClientset() for test file %q: %v", fullPath, err)
}
// the order of these tests should match the case check
// for httpTestItr in httpHandler
testCases := []struct {
name string
expectedError bool
}{
{
name: "token-id not defined",
expectedError: true,
},
{
name: "secret name not formatted correctly",
expectedError: true,
},
{
name: "token-secret not defined",
expectedError: true,
},
{
name: "token expiration not formatted correctly",
expectedError: true,
},
{
name: "token expiration formatted correctly",
expectedError: false,
},
{
name: "token usage constant not true",
expectedError: false,
},
{
name: "token usage constant set to true",
expectedError: false,
},
}
for _, tc := range testCases {
bufErr.Reset()
atomic.StoreUint32(&httpSentResponse, 0)
fmt.Printf("Running HTTP test case (%d) %q\n", atomic.LoadUint32(&httpTestItr), tc.name)
// should always return nil here if a valid list of secrets if fetched
err := RunListTokens(&bufOut, &bufErr, client)
if err != nil {
t.Errorf("HTTP test case %d: Was unable to fetch a list of secrets", atomic.LoadUint32(&httpTestItr))
}
// wait for a response from the dummy HTTP server
timeSpent := 0 * time.Millisecond
timeToSleep := 50 * time.Millisecond
timeMax := 2000 * time.Millisecond
for {
if atomic.LoadUint32(&httpSentResponse) == 1 {
break
}
if timeSpent >= timeMax {
t.Errorf("HTTP test case %d: The server did not respond within %d ms", atomic.LoadUint32(&httpTestItr), timeMax)
}
timeSpent += timeToSleep
time.Sleep(timeToSleep)
}
// check if an error is written in the error buffer
hasError := bufErr.Len() != 0
if hasError != tc.expectedError {
t.Errorf("HTTP test case %d: RunListTokens expected error: %v, saw: %v; %v", atomic.LoadUint32(&httpTestItr), tc.expectedError, hasError, bufErr.String())
}
}
}
// only one of these should run at a time in a goroutine
func httpHandler(w http.ResponseWriter, r *http.Request) {
tokenID := []byte("07401b")
tokenSecret := []byte("f395accd246ae52d")
tokenExpire := []byte("2012-11-01T22:08:41+00:00")
badValue := "bad-value"
name := bootstrapapi.BootstrapTokenSecretPrefix + string(tokenID)
tokenUsageKey := bootstrapapi.BootstrapTokenUsagePrefix + "test"
secret := v1.Secret{}
secret.Type = bootstrapapi.SecretTypeBootstrapToken
secret.TypeMeta = metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"}
secret.Data = map[string][]byte{}
switch atomic.LoadUint32(&httpTestItr) {
case 0:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = []byte("")
case 1:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.ObjectMeta = metav1.ObjectMeta{Name: badValue}
case 2:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.Data[bootstrapapi.BootstrapTokenSecretKey] = []byte("")
secret.ObjectMeta = metav1.ObjectMeta{Name: name}
case 3:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.Data[bootstrapapi.BootstrapTokenSecretKey] = tokenSecret
secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(badValue)
secret.ObjectMeta = metav1.ObjectMeta{Name: name}
case 4:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.Data[bootstrapapi.BootstrapTokenSecretKey] = tokenSecret
secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = tokenExpire
secret.ObjectMeta = metav1.ObjectMeta{Name: name}
case 5:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.Data[bootstrapapi.BootstrapTokenSecretKey] = tokenSecret
secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = tokenExpire
secret.Data[tokenUsageKey] = []byte("false")
secret.ObjectMeta = metav1.ObjectMeta{Name: name}
case 6:
secret.Data[bootstrapapi.BootstrapTokenIDKey] = tokenID
secret.Data[bootstrapapi.BootstrapTokenSecretKey] = tokenSecret
secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = tokenExpire
secret.Data[tokenUsageKey] = []byte("true")
secret.ObjectMeta = metav1.ObjectMeta{Name: name}
}
secretList := v1.SecretList{}
secretList.Items = []v1.Secret{secret}
secretList.TypeMeta = metav1.TypeMeta{APIVersion: "v1", Kind: "SecretList"}
output, err := json.Marshal(secretList)
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
w.Write([]byte(output))
}
atomic.AddUint32(&httpTestItr, 1)
atomic.StoreUint32(&httpSentResponse, 1)
}
| TestRunDeleteToken |
main.rs | use std::fs::File;
use std::io::{Read, BufWriter};
use std::error::Error;
use std::path::PathBuf;
use pathfinder_svg::BuiltSVG;
use pathfinder_export::{Export, FileFormat};
use usvg::{Tree, Options};
fn | () -> Result<(), Box<dyn Error>> {
let mut args = std::env::args_os().skip(1);
let input = PathBuf::from(args.next().expect("no input given"));
let output = PathBuf::from(args.next().expect("no output given"));
let mut data = Vec::new();
File::open(input)?.read_to_end(&mut data)?;
let svg = BuiltSVG::from_tree(&Tree::from_data(&data, &Options::default()).unwrap());
let scene = &svg.scene;
let mut writer = BufWriter::new(File::create(&output)?);
let format = match output.extension().and_then(|s| s.to_str()) {
Some("pdf") => FileFormat::PDF,
Some("ps") => FileFormat::PS,
_ => return Err("output filename must have .ps or .pdf extension".into())
};
scene.export(&mut writer, format).unwrap();
Ok(())
}
| main |
v1_event_list.py | """
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argocd_python_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argocd_python_client.exceptions import ApiAttributeError
def lazy_import():
from argocd_python_client.model.v1_event import V1Event
from argocd_python_client.model.v1_list_meta import V1ListMeta
globals()['V1Event'] = V1Event
globals()['V1ListMeta'] = V1ListMeta
class V1EventList(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'items': ([V1Event],), # noqa: E501
'metadata': (V1ListMeta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
'metadata': 'metadata', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""V1EventList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([V1Event]): [optional] # noqa: E501
metadata (V1ListMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
|
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""V1EventList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
items ([V1Event]): [optional] # noqa: E501
metadata (V1ListMeta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| continue |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![warn(missing_docs)]
//! `timekeeper` is responsible for external time synchronization in Fuchsia.
use {
anyhow::{Context as _, Error},
chrono::prelude::*,
fidl_fuchsia_deprecatedtimezone as ftz, fidl_fuchsia_net as fnet, fidl_fuchsia_time as ftime,
fuchsia_async::{self as fasync, DurationExt},
fuchsia_component::server::ServiceFs,
fuchsia_zircon as zx,
futures::{StreamExt, TryStreamExt},
log::{debug, error, info, warn},
parking_lot::Mutex,
std::{path::Path, sync::Arc},
};
mod diagnostics;
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
diagnostics::init();
let mut fs = ServiceFs::new();
info!("diagnostics initialized, connecting notifier to servicefs.");
diagnostics::INSPECTOR.serve_tree(&mut fs)?;
let source = initial_utc_source("/config/build-info/minimum-utc-stamp".as_ref())?;
let notifier = Notifier::new(source);
info!("connecting to external update service");
let time_service =
fuchsia_component::client::connect_to_service::<ftz::TimeServiceMarker>().unwrap();
let connectivity_service =
fuchsia_component::client::connect_to_service::<fnet::ConnectivityMarker>().unwrap();
fasync::spawn(maintain_utc(notifier.clone(), time_service, connectivity_service));
fs.dir("svc").add_fidl_service(move |requests: ftime::UtcRequestStream| {
notifier.handle_request_stream(requests);
});
info!("added notifier, serving servicefs");
fs.take_and_serve_directory_handle()?;
let () = fs.collect().await;
Ok(())
}
fn backstop_time(path: &Path) -> Result<DateTime<Utc>, Error> {
let file_contents = std::fs::read_to_string(path).context("reading backstop time from disk")?;
let parsed_offset = NaiveDateTime::parse_from_str(file_contents.trim(), "%s")?;
let utc = DateTime::from_utc(parsed_offset, Utc);
Ok(utc)
}
fn initial_utc_source(backstop_path: &Path) -> Result<Option<ftime::UtcSource>, Error> {
let expected_minimum = backstop_time(backstop_path)?;
let current_utc = Utc::now();
Ok(if current_utc > expected_minimum {
Some(ftime::UtcSource::Backstop)
} else {
warn!(
"latest known-past UTC time ({}) should be earlier than current system time ({})",
expected_minimum, current_utc,
);
None
})
}
/// The top-level control loop for time synchronization.
///
/// Checks for network connectivity before attempting any time updates.
///
/// Actual updates are performed by calls to `fuchsia.deprecatedtimezone.TimeService` which we
/// plan to deprecate.
async fn maintain_utc(
notifs: Notifier,
time_service: ftz::TimeServiceProxy,
connectivity: fnet::ConnectivityProxy,
) {
// wait for the network to come up before we start checking for time
let mut conn_events = connectivity.take_event_stream();
loop {
if let Ok(Some(fnet::ConnectivityEvent::OnNetworkReachable { reachable: true })) =
conn_events.try_next().await
{
break;
}
}
for i in 0.. {
match time_service.update(1).await {
Ok(true) => {
let monotonic_before = zx::Time::get(zx::ClockId::Monotonic).into_nanos();
let utc_now = Utc::now().timestamp_nanos();
let monotonic_after = zx::Time::get(zx::ClockId::Monotonic).into_nanos();
info!(
"CF-884:monotonic_before={}:utc={}:monotonic_after={}",
monotonic_before, utc_now, monotonic_after,
);
notifs.0.lock().set_source(ftime::UtcSource::External, monotonic_before);
break;
}
Ok(false) => {
debug!("failed to update time, probably a network error. retrying.");
}
Err(why) => {
error!("couldn't make request to update time: {:?}", why);
}
}
let sleep_duration = zx::Duration::from_seconds(2i64.pow(i)); // exponential backoff
fasync::Timer::new(sleep_duration.after_now()).await;
}
}
/// Notifies waiting clients when the clock has been updated, wrapped in a lock to allow
/// sharing between tasks.
#[derive(Clone, Debug)]
struct Notifier(Arc<Mutex<NotifyInner>>);
impl Notifier {
fn new(source: Option<ftime::UtcSource>) -> Self {
Notifier(Arc::new(Mutex::new(NotifyInner { source, clients: Vec::new() })))
}
/// Spawns an async task to handle requests on this channel.
fn handle_request_stream(&self, requests: ftime::UtcRequestStream) {
let notifier = self.clone();
fasync::spawn(async move {
info!("listening for UTC requests");
let mut counted_requests = requests.enumerate();
let mut last_seen_state = notifier.0.lock().source;
while let Some((request_count, Ok(ftime::UtcRequest::WatchState { responder }))) =
counted_requests.next().await
{
let mut n = notifier.0.lock();
// we return immediately if this is the first request on this channel, but if
// the backstop time hasn't been set yet then we can't say anything
if n.source.is_some() && (request_count == 0 || last_seen_state != n.source) {
n.reply(responder, zx::Time::get(zx::ClockId::Monotonic).into_nanos());
} else {
n.register(responder);
}
last_seen_state = n.source;
}
});
}
} |
/// Notifies waiting clients when the clock has been updated.
#[derive(Debug)]
struct NotifyInner {
/// The current source for our UTC approximation.
source: Option<ftime::UtcSource>,
/// All clients waiting for an update to UTC's time.
clients: Vec<ftime::UtcWatchStateResponder>,
}
impl NotifyInner {
/// Reply to a client with the current UtcState.
fn reply(&self, responder: ftime::UtcWatchStateResponder, update_time: i64) {
if let Err(why) =
responder.send(ftime::UtcState { timestamp: Some(update_time), source: self.source })
{
warn!("failed to notify a client of an update: {:?}", why);
}
}
/// Registers a client to be later notified that a clock update has occurred.
fn register(&mut self, responder: ftime::UtcWatchStateResponder) {
info!("registering a client for notifications");
self.clients.push(responder);
}
/// Increases the revision counter by 1 and notifies any clients waiting on updates from
/// previous revisions.
fn set_source(&mut self, source: ftime::UtcSource, update_time: i64) {
if self.source != Some(source) {
self.source = Some(source);
let clients = std::mem::replace(&mut self.clients, vec![]);
info!("UTC source changed to {:?}, notifying {} clients", source, clients.len());
for responder in clients {
self.reply(responder, update_time);
}
} else {
info!("received UTC source update but the actual source didn't change.");
}
}
}
#[cfg(test)]
mod tests {
#[allow(unused)]
use {
super::*,
chrono::{offset::TimeZone, NaiveDate},
fuchsia_inspect::{assert_inspect_tree, testing::AnyProperty},
fuchsia_zircon as zx,
std::{
future::Future,
pin::Pin,
task::{Context, Poll, Waker},
},
};
#[test]
fn fixed_backstop_check() {
let y2k_backstop = "/pkg/data/y2k";
let test_backstop = backstop_time(y2k_backstop.as_ref()).unwrap();
let test_source = initial_utc_source(y2k_backstop.as_ref()).unwrap();
let before_test_backstop =
Utc.from_utc_datetime(&NaiveDate::from_ymd(1999, 1, 1).and_hms(0, 0, 0));
let after_test_backstop =
Utc.from_utc_datetime(&NaiveDate::from_ymd(2001, 1, 1).and_hms(0, 0, 0));
assert!(test_backstop > before_test_backstop);
assert!(test_backstop < after_test_backstop);
assert_eq!(test_source, Some(ftime::UtcSource::Backstop));
}
#[test]
fn fallible_backstop_check() {
assert_eq!(initial_utc_source("/pkg/data/end-of-unix-time".as_ref()).unwrap(), None);
}
#[fasync::run_singlethreaded(test)]
async fn single_client() {
diagnostics::init();
info!("starting single notification test");
let (utc, utc_requests) =
fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
let (time_service, mut time_requests) =
fidl::endpoints::create_proxy_and_stream::<ftz::TimeServiceMarker>().unwrap();
let (reachability, reachability_server) =
fidl::endpoints::create_proxy::<fnet::ConnectivityMarker>().unwrap();
// the "network" the time sync server uses is de facto reachable here
let (_, reachability_control) =
reachability_server.into_stream_and_control_handle().unwrap();
reachability_control.send_on_network_reachable(true).unwrap();
let notifier = Notifier::new(Some(ftime::UtcSource::Backstop));
let (mut allow_update, mut wait_for_update) = futures::channel::mpsc::channel(1);
info!("spawning test notifier");
notifier.handle_request_stream(utc_requests);
fasync::spawn(maintain_utc(notifier.clone(), time_service, reachability));
fasync::spawn(async move {
while let Some(Ok(ftz::TimeServiceRequest::Update { responder, .. })) =
time_requests.next().await
{
let () = wait_for_update.next().await.unwrap();
responder.send(true).unwrap();
}
});
info!("checking that the time source has not been externally initialized yet");
assert_eq!(utc.watch_state().await.unwrap().source.unwrap(), ftime::UtcSource::Backstop);
let task_waker = futures::future::poll_fn(|cx| Poll::Ready(cx.waker().clone())).await;
let mut cx = Context::from_waker(&task_waker);
let mut hanging = Box::pin(utc.watch_state());
assert!(
hanging.as_mut().poll(&mut cx).is_pending(),
"hanging get should not return before time updated event has been emitted"
);
info!("sending network update event");
allow_update.try_send(()).unwrap();
info!("waiting for time source update");
assert_eq!(hanging.await.unwrap().source.unwrap(), ftime::UtcSource::External);
}
#[fasync::run_singlethreaded(test)]
async fn inspect_values_are_present() -> Result<(), Error> {
diagnostics::init();
assert_inspect_tree!(diagnostics::INSPECTOR,
root: contains {
start_time_monotonic_nanos: AnyProperty,
current: contains {
system_uptime_monotonic_nanos: AnyProperty,
}
});
Ok(())
}
} | |
setup.py | import ast
import os
import sys
from setuptools import (
find_packages,
setup,
)
def get_version():
path = os.path.join(os.path.dirname(__file__), 'threema', 'gateway', '__init__.py')
with open(path) as file:
for line in file:
if line.startswith('__version__'):
_, value = line.split('=', maxsplit=1)
return ast.literal_eval(value.strip())
else:
raise Exception('Version not found in {}'.format(path))
def read(file):
|
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# Import long description
long_description = '\n\n'.join((read('README.rst'), read('CHANGELOG.rst')))
# Check python version
py_version = sys.version_info[:3]
if py_version < (3, 6, 1):
raise Exception("threema.gateway requires Python >= 3.6.1")
# Test requirements
# Note: These are just tools that aren't required, so a version range
# is not necessary here.
tests_require = [
'pytest>=3.1.3,<4',
'pytest-asyncio>=0.6.0,<0.10',
'pytest-cov>=2.5.1,<3',
'flake8==3.7.9',
'isort==4.3.21',
'collective.checkdocs>=0.2',
'Pygments>=2.2.0', # required by checkdocs
'mypy==0.800',
]
setup(
name='threema.gateway',
version=get_version(),
packages=find_packages(),
namespace_packages=['threema'],
install_requires=[
'memoization==0.2.3', # we're using private APIs
'logbook>=1.1.0,<2',
'libnacl>=1.5.2,<2',
'click>=6.7,<7', # doesn't seem to follow semantic versioning
'aiohttp>=3.7.3,<4',
'wrapt>=1.10.10,<2',
],
tests_require=tests_require,
extras_require={
':python_version<="3.4"': [
'asyncio==3.4.3',
'pytest-asyncio==0.5.0'
],
':python_version<="3.5"': [
'typing>=3.6.1,<3.7',
],
'dev': tests_require,
'uvloop': ['uvloop>=0.8.0,<2'],
},
include_package_data=True,
entry_points={
'console_scripts': [
'threema-gateway = threema.gateway.bin.gateway_client:main',
],
},
# PyPI metadata
author='Lennart Grahl',
author_email='[email protected]',
description=('An API for the Threema gateway service to send and receive '
'messages including text, images, files and delivery reports.'),
long_description=long_description,
license='MIT License',
keywords='threema gateway service sdk api',
url='https://gateway.threema.ch/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Communications :: Chat',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Logging',
],
)
| return open(os.path.join(os.path.dirname(__file__), file)).read().strip() |
contract.rs | #[cfg(not(feature = "library"))]
use cosmwasm_std::entry_point;
use cosmwasm_std::{
Addr, DepsMut, Env, MessageInfo, Response, QuerierWrapper, Uint128, Storage, StdResult
};
use cw2::set_contract_version;
use crate::error::ContractError;
use crate::msg::{ExecuteMsg, InstantiateMsg};
use crate::state::{TOKEN0, TOKEN1, PAIR, PRICE0, PRICE1, START_TIME, EPOCH, PERIOD,
LAST_EPOCH_TIME, OPERATOR};
use terraswap::asset::{AssetInfo, Asset};
use terraswap::pair::{QueryMsg as PairQueryMsg, SimulationResponse, PoolResponse};
use terraswap::querier::{simulate, query_pair_info};
// version info for migration info
const CONTRACT_NAME: &str = "Oracle";
const CONTRACT_VERSION: &str = env!("CARGO_PKG_VERSION");
pub fn get_price(querier: &QuerierWrapper, _pair: Addr, asset_info: &AssetInfo) -> Uint128 {
let offer_asset = Asset{
info: asset_info.clone(),
amount: Uint128::from(1u128)
};
let sim_res: SimulationResponse = simulate( querier, _pair, &offer_asset ).unwrap();
sim_res.return_amount
}
pub fn check_onlyoperator(storage: &dyn Storage, sender: Addr) -> Result<Response, ContractError> {
let operator = OPERATOR.load(storage)?;
if operator != sender {
return Err(ContractError::Unauthorized{});
}
Ok(Response::new())
}
pub fn check_starttime(storage: &dyn Storage, env: Env)
->Result<Response, ContractError>
{
if Uint128::from(env.block.time.seconds() as u128) < START_TIME.load(storage)? {
return Err(ContractError::NotStartedYet{ });
}
Ok(Response::new())
}
pub fn get_next_epoch_point(storage: &dyn Storage)
->StdResult<Uint128>
{
Ok(LAST_EPOCH_TIME.load(storage)? + PERIOD.load(storage)?)
}
pub fn check_epoch(storage: &dyn Storage, env: Env, sender: Addr)
->Result<Response, ContractError>
|
pub fn check_epoch_after(storage:&mut dyn Storage, env: Env)
->Result<Response, ContractError>
{
let mut next_epoch_point = get_next_epoch_point(storage)?;
loop {
LAST_EPOCH_TIME.save(storage, &next_epoch_point)?;
let mut epoch = EPOCH.load(storage)?;
epoch += Uint128::from(1u128);
next_epoch_point = get_next_epoch_point(storage)?;
if Uint128::from(env.block.time.seconds() as u128) < next_epoch_point{
break;
}
}
Ok(Response::new())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn instantiate(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: InstantiateMsg,
) -> Result<Response, ContractError> {
set_contract_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?;
//-----------epoch----------------
OPERATOR.save(deps.storage, &info.sender)?;
PERIOD.save(deps.storage, &msg.period)?;
START_TIME.save(deps.storage, &msg.start_time)?;
EPOCH.save(deps.storage, &Uint128::zero())?;
LAST_EPOCH_TIME.save(deps.storage, &(msg.start_time - msg.period))?;
//----------------------------------
let pair = msg.pair;
PAIR.save(deps.storage, &pair)?;
let pair_info: PoolResponse = deps.querier.query_wasm_smart(
pair.clone(),
&PairQueryMsg::Pool{}
).unwrap();
let token0 = pair_info.assets[0].info.clone();
let token1 = pair_info.assets[0].info.clone();
TOKEN0.save( deps.storage, &token0)?;
TOKEN1.save( deps.storage, &token1)?;
PRICE0.save(deps.storage, &get_price(&deps.querier, pair.clone(), &token0))?;
PRICE1.save(deps.storage, &get_price(&deps.querier, pair, &token1))?;
Ok(Response::new()
.add_attribute("method", "instantiate"))
}
#[entry_point]
pub fn execute(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: ExecuteMsg,
) -> Result<Response, ContractError> {
match msg {
ExecuteMsg::Update { } => try_update(deps, env, info),
ExecuteMsg::SetPeriod{ period } => try_setperiod(deps, info, period),
ExecuteMsg::SetEpoch{ epoch } => try_setepoch(deps, info, epoch),
}
}
pub fn try_update(deps:DepsMut, env:Env, info:MessageInfo)
-> Result<Response, ContractError>
{
check_epoch(deps.storage, env.clone(), info.sender.clone())?;
let token0 = TOKEN0.load(deps.storage)?;
let token1 = TOKEN1.load(deps.storage)?;
let pair = PAIR.load(deps.storage)?;
TOKEN0.save( deps.storage, &token0)?;
TOKEN1.save( deps.storage, &token1)?;
PRICE0.save(deps.storage, &get_price(&deps.querier, pair.clone(), &token0))?;
PRICE1.save(deps.storage, &get_price(&deps.querier, pair, &token1))?;
check_epoch_after(deps.storage, env.clone())?;
Ok(Response::new())
}
pub fn try_setperiod(deps: DepsMut, info: MessageInfo, period: Uint128)
-> Result<Response, ContractError>
{
check_onlyoperator(deps.storage, info.sender)?;
let hour = Uint128::from(3600u128);
if period < Uint128::from(1u128) * hour || period > Uint128::from(48u128) * hour {
return Err(ContractError::OutOfRange{ });
}
PERIOD.save(deps.storage, &period)?;
Ok(Response::new())
}
pub fn try_setepoch(deps: DepsMut, info: MessageInfo, epoch: Uint128)
-> Result<Response, ContractError>
{
check_onlyoperator(deps.storage, info.sender)?;
EPOCH.save(deps.storage, &epoch)?;
Ok(Response::new())
}
| {
let next_epoch_point = get_next_epoch_point(storage)?;
if Uint128::from(env.block.time.seconds() as u128) < next_epoch_point {
if sender != OPERATOR.load(storage)? {
return Err(ContractError::Unauthorized{ });
}
}
Ok(Response::new())
} |
external.rs | use crate::blobs::{Blob, HasBlob};
use crate::{Bytes, LexState, Loc};
/// Byte sequence based on external implementation
#[repr(C)]
pub struct Token {
pub(crate) blob: Blob<Self>,
}
impl Clone for Token {
fn clone(&self) -> Self {
Self::new(
self.token_type(),
self.token_value().clone(),
self.loc().clone(),
self.lex_state_before(),
self.lex_state_after(),
)
}
}
impl PartialEq for Token {
fn eq(&self, other: &Self) -> bool {
(self.token_type() == other.token_type())
&& (self.token_value() == other.token_value())
&& (self.loc() == other.loc())
&& (self.lex_state_before() == other.lex_state_before())
&& (self.lex_state_after() == other.lex_state_after())
}
}
impl Eq for Token {}
impl Drop for Token {
fn drop(&mut self) {
unsafe { lib_ruby_parser__external__token__drop(&mut self.blob) }
}
}
extern "C" {
fn lib_ruby_parser__external__token__new(
token_type: i32,
token_value: Blob<Bytes>,
loc: Blob<Loc>,
lex_state_before: i32,
lex_state_after: i32,
) -> Blob<Token>; | fn lib_ruby_parser__external__token__get_token_value(
blob: *const Blob<Token>,
) -> *const Blob<Bytes>;
fn lib_ruby_parser__external__token__set_token_value(
blob: *mut Blob<Token>,
bytes_blob: Blob<Bytes>,
);
fn lib_ruby_parser__external__token__into_token_value(blob: Blob<Token>) -> Blob<Bytes>;
fn lib_ruby_parser__external__token__get_loc(blob: *const Blob<Token>) -> *const Blob<Loc>;
fn lib_ruby_parser__external__token__get_lex_state_before(blob: *const Blob<Token>) -> i32;
fn lib_ruby_parser__external__token__get_lex_state_after(blob: *const Blob<Token>) -> i32;
}
impl Token {
/// Constructor
pub fn new(
token_type: i32,
token_value: Bytes,
loc: Loc,
lex_state_before: LexState,
lex_state_after: LexState,
) -> Self {
let blob = unsafe {
lib_ruby_parser__external__token__new(
token_type,
token_value.into_blob(),
loc.into_blob(),
lex_state_before.get(),
lex_state_after.get(),
)
};
Self { blob }
}
/// Returns type of the token
pub fn token_type(&self) -> i32 {
unsafe { lib_ruby_parser__external__token__get_token_type(&self.blob) }
}
/// Returns type of the token
pub fn token_value(&self) -> &Bytes {
unsafe {
(lib_ruby_parser__external__token__get_token_value(&self.blob) as *const Bytes)
.as_ref()
.unwrap()
}
}
/// Sets token value
pub fn set_token_value(&mut self, token_value: Bytes) {
unsafe {
lib_ruby_parser__external__token__set_token_value(
&mut self.blob,
token_value.into_blob(),
)
}
}
/// Consumes self, returns owned values of the token
pub fn into_token_value(self) -> Bytes {
let bytes_blob =
unsafe { lib_ruby_parser__external__token__into_token_value(self.into_blob()) };
Bytes { blob: bytes_blob }
}
/// Returns location of the token
pub fn loc(&self) -> &Loc {
unsafe {
(lib_ruby_parser__external__token__get_loc(&self.blob) as *const Loc)
.as_ref()
.unwrap()
}
}
/// Returns lex state **before** reading the token
pub fn lex_state_before(&self) -> LexState {
let value = unsafe { lib_ruby_parser__external__token__get_lex_state_before(&self.blob) };
LexState { value }
}
/// Returns lex state **after** reading the token
pub fn lex_state_after(&self) -> LexState {
let value = unsafe { lib_ruby_parser__external__token__get_lex_state_after(&self.blob) };
LexState { value }
}
} | fn lib_ruby_parser__external__token__drop(blob: *mut Blob<Token>);
fn lib_ruby_parser__external__token__get_token_type(blob: *const Blob<Token>) -> i32; |
mysql_flavor_mysql56_test.go | // Copyright 2015, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mysqlctl
import (
"reflect"
"testing"
"github.com/gitql/vitess/go/mysqlconn/replication"
"github.com/gitql/vitess/go/sqldb"
)
func TestMysql56VersionMatch(t *testing.T) {
table := map[string]bool{
"10.0.13-MariaDB-1~precise-log": false,
"5.1.63-google-log": false,
"5.6.24-log": true,
}
for input, want := range table {
if got := (&mysql56{}).VersionMatch(input); got != want {
t.Errorf("(&mysql56{}).VersionMatch(%#v) = %v, want %v", input, got, want)
}
}
}
func TestMysql56ResetReplicationCommands(t *testing.T) {
want := []string{
"STOP SLAVE",
"RESET SLAVE ALL",
"RESET MASTER",
}
if got := (&mysql56{}).ResetReplicationCommands(); !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).ResetReplicationCommands() = %#v, want %#v", got, want)
}
}
func TestMysql56PromoteSlaveCommands(t *testing.T) {
want := []string{"RESET SLAVE ALL"}
if got := (&mysql56{}).PromoteSlaveCommands(); !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).PromoteSlaveCommands() = %#v, want %#v", got, want)
}
}
func | (t *testing.T) {
pos, _ := (&mysql56{}).ParseReplicationPosition("00010203-0405-0607-0809-0a0b0c0d0e0f:1-2")
want := []string{
"RESET MASTER",
"SET GLOBAL gtid_purged = '00010203-0405-0607-0809-0a0b0c0d0e0f:1-2'",
}
got, err := (&mysql56{}).SetSlavePositionCommands(pos)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).SetSlavePositionCommands(%#v) = %#v, want %#v", pos, got, want)
}
}
func TestMysql56SetMasterCommands(t *testing.T) {
params := &sqldb.ConnParams{
Uname: "username",
Pass: "password",
}
masterHost := "localhost"
masterPort := 123
masterConnectRetry := 1234
want := []string{
`CHANGE MASTER TO
MASTER_HOST = 'localhost',
MASTER_PORT = 123,
MASTER_USER = 'username',
MASTER_PASSWORD = 'password',
MASTER_CONNECT_RETRY = 1234,
MASTER_AUTO_POSITION = 1`,
}
got, err := (&mysql56{}).SetMasterCommands(params, masterHost, masterPort, masterConnectRetry)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want)
}
}
func TestMysql56SetMasterCommandsSSL(t *testing.T) {
params := &sqldb.ConnParams{
Uname: "username",
Pass: "password",
SslCa: "ssl-ca",
SslCaPath: "ssl-ca-path",
SslCert: "ssl-cert",
SslKey: "ssl-key",
}
params.EnableSSL()
masterHost := "localhost"
masterPort := 123
masterConnectRetry := 1234
want := []string{
`CHANGE MASTER TO
MASTER_HOST = 'localhost',
MASTER_PORT = 123,
MASTER_USER = 'username',
MASTER_PASSWORD = 'password',
MASTER_CONNECT_RETRY = 1234,
MASTER_SSL = 1,
MASTER_SSL_CA = 'ssl-ca',
MASTER_SSL_CAPATH = 'ssl-ca-path',
MASTER_SSL_CERT = 'ssl-cert',
MASTER_SSL_KEY = 'ssl-key',
MASTER_AUTO_POSITION = 1`,
}
got, err := (&mysql56{}).SetMasterCommands(params, masterHost, masterPort, masterConnectRetry)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want)
}
}
func TestMysql56MakeBinlogEvent(t *testing.T) {
input := []byte{1, 2, 3}
want := replication.NewMysql56BinlogEvent([]byte{1, 2, 3})
if got := (&mysql56{}).MakeBinlogEvent(input); !reflect.DeepEqual(got, want) {
t.Errorf("(&mysql56{}).MakeBinlogEvent(%#v) = %#v, want %#v", input, got, want)
}
}
| TestMysql56SetSlavePositionCommands |
reader.go | /*
* Copyright 2012-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package conf
import (
"bytes"
"io/ioutil"
"github.com/spf13/viper"
)
func init() {
RegisterReader(Viper("prop"), ".properties")
RegisterReader(Viper("yaml"), ".yaml", ".yml")
RegisterReader(Viper("toml"), ".toml")
}
// Reader 属性读取器接口
type Reader interface {
FileExt() []string // 属性读取器支持的文件扩展名的列表
ReadFile(filename string, out map[string]interface{}) error
ReadBuffer(buffer []byte, out map[string]interface{}) error
}
var readers []Reader
// EachReader 遍历属性读取器列表
func EachReader(fn func(r Reader) error) error {
for _, r := range readers {
if err := fn(r); err != nil {
return err
}
}
return nil
}
// RegisterReader 注册属性读取器
| (fn ReaderFunc, fileExt ...string) {
readers = append(readers, &reader{ext: fileExt, fn: fn})
}
type ReaderFunc func(b []byte, out map[string]interface{}) error
// reader 属性读取接口的默认实现。
type reader struct {
ext []string
fn ReaderFunc
}
// FileExt 返回属性读取器对应的文件扩展名的列表
func (r *reader) FileExt() []string { return r.ext }
// ReadBuffer 从内存中读取当前属性读取器支持的格式。
func (r *reader) ReadBuffer(b []byte, out map[string]interface{}) error {
return r.fn(b, out)
}
// ReadBuffer 从文件中读取当前属性读取器支持的格式。
func (r *reader) ReadFile(filename string, out map[string]interface{}) error {
file, err := ioutil.ReadFile(filename)
if err != nil {
return err
}
return r.ReadBuffer(file, out)
}
// Viper 使用 viper 读取 fileType 类型的属性文件。
func Viper(fileType string) ReaderFunc {
return func(b []byte, out map[string]interface{}) error {
v := viper.New()
v.SetConfigType(fileType)
if err := v.ReadConfig(bytes.NewBuffer(b)); err != nil {
return err
}
for _, key := range v.AllKeys() {
val := v.Get(key)
out[key] = val
}
return nil
}
}
| func RegisterReader |
world.ts | import { Region } from './region';
import { Location } from './location';
import { RandomizerSettings } from './randomizerSettings';
import { RegionCollection } from './regionCollection';
import { LocationCollection } from './locationCollection';
import { ItemCollection } from './itemCollection';
import { MersenneTwister } from '../mersenneTwister';
import { SearchResults, VisitedRegionWrapper } from './searchResults';
/**
* Generic representation of a Metroid Prime-like game world.
* In actual use, this class is extended by a game-specific subclass, such as PrimeWorld.
*/
export class | {
protected settings: RandomizerSettings;
protected rng: MersenneTwister;
protected regions: RegionCollection;
protected itemPool: ItemCollection;
protected cachedLocations: LocationCollection;
protected rootRegion: Region;
constructor(settings: RandomizerSettings) {
this.settings = settings;
}
/**
* Returns the collection of regions belonging to this world.
*/
getRegions(): RegionCollection {
return this.regions;
}
/**
* Returns a region by its name.
* @param key The name of the region being queried.
*/
getRegionByKey(key: string): Region {
return this.regions.getRegionByKey(key);
}
/**
* Assigns a region collection to this world.
* @param regions The region collection being added.
*/
setRegions(regions: RegionCollection) {
this.regions = regions;
}
/**
* Returns the configuration settings used to generate this world.
*/
getSettings(): RandomizerSettings {
return this.settings;
}
/**
* Sets the configuration settings used to generate this world.
* @param settings The configuration object being set.
*/
setSettings(settings: RandomizerSettings) {
this.settings = settings;
}
/**
* Returns the pool of items that will be placed in this world.
*/
getItemPool(): ItemCollection {
return this.itemPool;
}
/**
* Assigns the to-be-placed pool of items to this world.
* @param itemPool The item pool being added.
*/
setItemPool(itemPool: ItemCollection) {
this.itemPool = itemPool;
}
/**
* Returns the random number generator used to generate this world.
*/
getRng(): MersenneTwister {
return this.rng;
}
/**
* Assigns a random number generator to this world.
* @param rng The random number generator being added.
*/
setRng(rng: MersenneTwister) {
this.rng = rng;
}
/**
* Returns the starting point/region of this world.
*/
getRootRegion(): Region {
return this.rootRegion;
}
/**
* Assigns the starting region to this world.
* @param rootRegion The starting region to be set.
*/
setRootRegion(rootRegion: Region) {
this.rootRegion = rootRegion;
}
/**
* Returns all of the item locations found in this world.
*/
getLocations(): LocationCollection {
if (!this.cachedLocations) {
let locations: Location[] = [];
for (const region of this.regions.toArray()) {
locations.push(...region.getLocations().toArray());
}
this.cachedLocations = new LocationCollection(locations);
}
return this.cachedLocations;
}
/**
* Returns an item location by its name.
* @param key The name of the location being queried.
*/
getLocationByKey(key: string) {
return this.getLocations().getLocationByKey(key);
}
/**
* Connects the exits/entrances of all of this world's regions.
*/
initializeEntrances(): void {
for (const region of this.getRegions().toArray()) {
for (const exit of region.getExits()) {
exit.connect(this.getRegions().getRegionByKey(exit.getConnectedRegionKey()));
}
}
}
/**
* Traverses the game world via graph search, marking which regions can be visited with the given items,
* and returns the results of the search.
*
* @param items The player's assumed item inventory when running the search.
* @param startingRegion The region to start the search in. Defaults to the root region if not provided.
*/
searchRegions(items: ItemCollection, startingRegion?: Region, destinationRegion?: Region): SearchResults {
// If no starting region is defined, get the first region the Root region is connected to and start there
if (!startingRegion) {
startingRegion = this.rootRegion.getExits()[0].getConnectedRegion();
}
// Visited regions object
const visited: VisitedRegionWrapper[] = [];
// Use an array instance as a queue
const regionQueue: Region[] = [];
// Mark the starting region as visited and enqueue it
visited.push({ region: startingRegion, entryPoint: null });
regionQueue.push(startingRegion);
while (regionQueue.length) {
// Dequeue a region.
const region = regionQueue.shift();
// Get all exits (and their connected regions) for the current region.
// If an adjacent region hasn't been visited, check if it can be visited.
// If it can, mark it visited and enqueue it.
for (const exit of region.getExits()) {
const connectedRegion = exit.getConnectedRegion();
// Check if the adjacent region can be visited
if (exit.accessRule(items, this.settings)
&& !visited.find(visitedItem => visitedItem.region.getName() === connectedRegion.getName())) {
visited.push({ region: connectedRegion, entryPoint: exit });
// If a destination is provided, break the search if we visit the destination
if (destinationRegion && connectedRegion.getName() === destinationRegion.getName()) {
break;
}
regionQueue.push(connectedRegion);
}
// Else, continue BFS
}
}
return new SearchResults({ visitedRegions: visited, items: items });
}
/**
* Stub method meant to be overridden in a sub-class.
* @param collectedItems
*/
collectItems(collectedItems?: ItemCollection): ItemCollection {
return null; //stub
}
}
| World |
pickPackageFromMeta.ts | import PnpmError from '@pnpm/error'
import { VersionSelectors } from '@pnpm/resolver-base'
import semver from 'semver'
import { RegistryPackageSpec } from './parsePref'
import { PackageInRegistry, PackageMeta } from './pickPackage'
export default function (
spec: RegistryPackageSpec,
preferredVersionSelectors: VersionSelectors | undefined,
meta: PackageMeta
): PackageInRegistry {
try {
let version!: string
switch (spec.type) {
case 'version':
version = spec.fetchSpec
break
case 'tag':
version = meta['dist-tags'][spec.fetchSpec]
break
case 'range':
version = pickVersionByVersionRange(meta, spec.fetchSpec, preferredVersionSelectors)
break
}
const manifest = meta.versions[version]
if (manifest && meta['name']) {
// Packages that are published to the GitHub registry are always published with a scope.
// However, the name in the package.json for some reason may omit the scope.
// So the package published to the GitHub registry will be published under @foo/bar
// but the name in package.json will be just bar.
// In order to avoid issues, we consider that the real name of the package is the one with the scope.
manifest.name = meta['name']
}
return manifest
} catch (err: any) { // eslint-disable-line
throw new PnpmError('MALFORMED_METADATA',
`Received malformed metadata for "${spec.name}"`,
{ hint: 'This might mean that the package was unpublished from the registry' }
)
}
}
function | (
meta: PackageMeta,
versionRange: string,
preferredVerSels?: VersionSelectors
) {
let versions: string[] | undefined
const latest = meta['dist-tags'].latest
const preferredVerSelsArr = Object.entries(preferredVerSels ?? {})
if (preferredVerSelsArr.length > 0) {
const preferredVersions: string[] = []
for (const [preferredSelector, preferredSelectorType] of preferredVerSelsArr) {
if (preferredSelector === versionRange) continue
switch (preferredSelectorType) {
case 'tag': {
preferredVersions.push(meta['dist-tags'][preferredSelector])
break
}
case 'range': {
// This might be slow if there are many versions
// and the package is an indirect dependency many times in the project.
// If it will create noticable slowdown, then might be a good idea to add some caching
versions = Object.keys(meta.versions)
for (const version of versions) {
if (semver.satisfies(version, preferredSelector, true)) {
preferredVersions.push(version)
}
}
break
}
case 'version': {
if (meta.versions[preferredSelector]) {
preferredVersions.push(preferredSelector)
}
break
}
}
}
if (preferredVersions.includes(latest) && semver.satisfies(latest, versionRange, true)) {
return latest
}
const preferredVersion = semver.maxSatisfying(preferredVersions, versionRange, true)
if (preferredVersion) {
return preferredVersion
}
}
// Not using semver.satisfies in case of * because it does not select beta versions.
// E.g.: 1.0.0-beta.1. See issue: https://github.com/pnpm/pnpm/issues/865
if (versionRange === '*' || semver.satisfies(latest, versionRange, true)) {
return latest
}
versions = versions ?? Object.keys(meta.versions)
const maxVersion = semver.maxSatisfying(versions, versionRange, true)
// if the selected version is deprecated, try to find a non-deprecated one that satisfies the range
if (maxVersion && meta.versions[maxVersion].deprecated && versions.length > 1) {
const nonDeprecatedVersions = versions.map((version) => meta.versions[version])
.filter((versionMeta) => !versionMeta.deprecated)
.map((versionMeta) => versionMeta.version)
const maxNonDeprecatedVersion = semver.maxSatisfying(nonDeprecatedVersions, versionRange, true)
if (maxNonDeprecatedVersion) return maxNonDeprecatedVersion
}
return maxVersion
}
| pickVersionByVersionRange |
test_mgmt_batch.py | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import io
import logging
import time
import unittest
import requests
import azure.mgmt.batch
from azure.mgmt.batch import models
from azure.common.exceptions import CloudError
from mgmt_batch_preparers import KeyVaultPreparer, SimpleBatchPreparer
from devtools_testutils import (
AzureMgmtTestCase,
ResourceGroupPreparer,
StorageAccountPreparer
)
AZURE_LOCATION = 'westcentralus'
EXISTING_BATCH_ACCOUNT = {'name': 'sdktest2', 'location': 'westcentralus'}
class MgmtBatchTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtBatchTest, self).setUp()
self.mgmt_batch_client = self.create_mgmt_client(
azure.mgmt.batch.BatchManagementClient)
self.mgmt_keyvault_client = self.create_mgmt_client(
azure.mgmt.keyvault.KeyVaultManagementClient)
def _get_account_name(self):
return self.get_resource_name('batch')[-24:]
def test_mgmt_batch_list_operations(self):
operations = self.mgmt_batch_client.operations.list()
all_ops = list(operations)
self.assertEqual(len(all_ops), 35)
self.assertEqual(all_ops[0].name, 'Microsoft.Batch/batchAccounts/providers/Microsoft.Insights/diagnosticSettings/read')
self.assertEqual(all_ops[0].origin, 'system')
self.assertEqual(all_ops[0].display.provider, 'Microsoft Batch')
self.assertEqual(all_ops[0].display.operation, 'Read diagnostic setting')
def test_mgmt_batch_subscription_quota(self):
quotas = self.mgmt_batch_client.location.get_quotas(AZURE_LOCATION)
self.assertIsInstance(quotas, models.BatchLocationQuota)
self.assertEqual(quotas.account_quota, 3)
def test_mgmt_batch_account_name(self):
# Test Invalid Account Name
availability = self.mgmt_batch_client.location.check_name_availability(
AZURE_LOCATION, "randombatchaccount@5^$g9873495873")
self.assertIsInstance(availability, models.CheckNameAvailabilityResult)
self.assertFalse(availability.name_available)
self.assertEqual(availability.reason, models.NameAvailabilityReason.invalid)
# Test Unvailable Account Name
availability = self.mgmt_batch_client.location.check_name_availability(
EXISTING_BATCH_ACCOUNT['location'], EXISTING_BATCH_ACCOUNT['name'])
self.assertIsInstance(availability, models.CheckNameAvailabilityResult)
self.assertFalse(availability.name_available)
self.assertEqual(availability.reason, models.NameAvailabilityReason.already_exists)
# Test Available Account Name
availability = self.mgmt_batch_client.location.check_name_availability(
AZURE_LOCATION, self._get_account_name())
self.assertIsInstance(availability, models.CheckNameAvailabilityResult)
self.assertTrue(availability.name_available)
@ResourceGroupPreparer(location=AZURE_LOCATION)
@KeyVaultPreparer(location=AZURE_LOCATION)
def test_mgmt_batch_byos_account(self, resource_group, location, keyvault):
if self.is_live:
keyvault = keyvault.result()
batch_account = models.BatchAccountCreateParameters(
location=location,
pool_allocation_mode=models.PoolAllocationMode.user_subscription)
with self.assertRaises(Exception): # TODO: What exception
creating = self.mgmt_batch_client.batch_account.create(
resource_group.name,
self._get_account_name(),
batch_account)
creating.result()
keyvault_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.KeyVault/vaults/{}".format(
self.settings.SUBSCRIPTION_ID, resource_group.name, keyvault.name)
keyvault_url = "https://{}.vault.azure.net/".format(keyvault.name)
batch_account = models.BatchAccountCreateParameters(
location=location,
pool_allocation_mode=models.PoolAllocationMode.user_subscription,
key_vault_reference={'id': keyvault_id, 'url': keyvault_url})
creating = self.mgmt_batch_client.batch_account.create(
resource_group.name,
self._get_account_name(),
batch_account)
creating.result()
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_mgmt_batch_account(self, resource_group, location):
batch_account = models.BatchAccountCreateParameters(
location=location,
)
account_name = self._get_account_name()
account_setup = self.mgmt_batch_client.batch_account.create(
resource_group.name,
account_name,
batch_account)
account_setup.result()
# Test Get Account
account = self.mgmt_batch_client.batch_account.get(resource_group.name, account_name)
self.assertEqual(account.dedicated_core_quota, 20)
self.assertEqual(account.low_priority_core_quota, 100)
self.assertEqual(account.pool_quota, 100)
self.assertEqual(account.pool_allocation_mode.value, 'BatchService')
# Test List Accounts by Resource Group
accounts = self.mgmt_batch_client.batch_account.list_by_resource_group(resource_group.name)
self.assertEqual(len(list(accounts)), 1)
# Test List Account Keys
keys = self.mgmt_batch_client.batch_account.get_keys(resource_group.name, account_name)
self.assertIsInstance(keys, models.BatchAccountKeys)
self.assertEqual(keys.account_name, account_name)
secondary = keys.secondary
# Test Regenerate Account Key
keys = self.mgmt_batch_client.batch_account.regenerate_key(
resource_group.name, account_name, 'Secondary')
self.assertIsInstance(keys, models.BatchAccountKeys)
self.assertFalse(keys.secondary == secondary)
# Test Update Account
update_tags = {'Name': 'tagName', 'Value': 'tagValue'}
updated = self.mgmt_batch_client.batch_account.update(resource_group.name, account_name, update_tags)
self.assertIsInstance(updated, models.BatchAccount)
self.assertEqual(updated.tags['Name'], 'tagName')
self.assertEqual(updated.tags['Value'], 'tagValue')
# Test Delete Account
response = self.mgmt_batch_client.batch_account.delete(resource_group.name, account_name)
self.assertIsNone(response.result())
@ResourceGroupPreparer(location=AZURE_LOCATION)
@StorageAccountPreparer(name_prefix='batch', location=AZURE_LOCATION)
def test_mgmt_batch_applications(self, resource_group, location, storage_account, storage_account_key):
# Test Create Account with Auto-Storage
storage_resource = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}'.format(
self.settings.SUBSCRIPTION_ID,
resource_group.name,
storage_account.name
)
batch_account = models.BatchAccountCreateParameters(
location=location,
auto_storage=models.AutoStorageBaseProperties(storage_resource)
) | resource_group.name,
account_name,
batch_account)
account_setup.result()
# Test Sync AutoStorage Keys
response = self.mgmt_batch_client.batch_account.synchronize_auto_storage_keys(
resource_group.name, account_name)
self.assertIsNone(response)
# Test Add Application
application_id = 'my_application_id'
application_name = 'my_application_name'
application_ver = 'v1.0'
application_properties = models.Application(display_name=application_name, allow_updates=True)
application = self.mgmt_batch_client.application.create(
resource_group.name, account_name, application_id, parameters=application_properties)
self.assertIsInstance(application, models.Application)
self.assertTrue(application_id in application.id)
self.assertTrue(application_name in application.display_name)
self.assertTrue(application.allow_updates)
# Test Mgmt Get Application
application = self.mgmt_batch_client.application.get(resource_group.name, account_name, application_id)
self.assertIsInstance(application, models.Application)
self.assertTrue(application_id in application.id)
self.assertTrue(application_name in application.display_name)
self.assertTrue(application.allow_updates)
# Test Mgmt List Applications
applications = self.mgmt_batch_client.application.list(resource_group.name, account_name)
self.assertTrue(len(list(applications)) > 0)
# Test Add Application Package
package_ref = self.mgmt_batch_client.application_package.create(
resource_group.name, account_name, application_id, application_ver)
self.assertIsInstance(package_ref, models.ApplicationPackage)
with io.BytesIO(b'Hello World') as f:
headers = {'x-ms-blob-type': 'BlockBlob'}
upload = requests.put(package_ref.storage_url, headers=headers, data=f.read())
if not upload:
raise ValueError('Upload failed: {!r}'.format(upload))
# Test Activate Application Package
response = self.mgmt_batch_client.application_package.activate(
resource_group.name, account_name, application_id, application_ver, 'zip')
self.assertTrue(response.state == models.PackageState.active)
# Test Update Application
params = models.Application(
allow_updates=False,
display_name='my_updated_name',
default_version=application_ver
)
response = self.mgmt_batch_client.application.update(
resource_group.name, account_name, application_id, params)
self.assertTrue(application_ver in response.default_version)
self.assertTrue('my_updated_name' in response.display_name)
self.assertFalse(response.allow_updates)
# Test Get Application Package
package_ref = self.mgmt_batch_client.application_package.get(
resource_group.name, account_name, application_id, application_ver)
self.assertIsInstance(package_ref, models.ApplicationPackage)
self.assertTrue(application_id in package_ref.id)
self.assertEqual(package_ref.format, 'zip')
self.assertEqual(package_ref.state, models.PackageState.active)
# Test Delete Application Package
response = self.mgmt_batch_client.application_package.delete(
resource_group.name, account_name, application_id, application_ver)
self.assertIsNone(response)
# Test Delete Application
response = self.mgmt_batch_client.application.delete(
resource_group.name, account_name, application_id)
self.assertIsNone(response)
# Test Delete Account
response = self.mgmt_batch_client.batch_account.delete(resource_group.name, account_name)
self.assertIsNone(response.result())
@ResourceGroupPreparer(location=AZURE_LOCATION)
@SimpleBatchPreparer(location=AZURE_LOCATION)
def test_mgmt_batch_certificates(self, resource_group, location, batch_account):
# Test Add Certificate
parameters = models.CertificateCreateOrUpdateParameters(
thumbprint='cff2ab63c8c955aaf71989efa641b906558d9fb7',
thumbprint_algorithm='sha1',
data='MIIGMQIBAzCCBe0GCSqGSIb3DQEHAaCCBd4EggXaMIIF1jCCA8AGCSqGSIb3DQEHAaCCA7EEggOtMIIDqTCCA6UGCyqGSIb3DQEMCgECoIICtjCCArIwHAYKKoZIhvcNAQwBAzAOBAhyd3xCtln3iQICB9AEggKQhe5P10V9iV1BsDlwWT561Yu2hVq3JT8ae/ebx1ZR/gMApVereDKkS9Zg4vFyssusHebbK5pDpU8vfAqle0TM4m7wGsRj453ZorSPUfMpHvQnAOn+2pEpWdMThU7xvZ6DVpwhDOQk9166z+KnKdHGuJKh4haMT7Rw/6xZ1rsBt2423cwTrQVMQyACrEkianpuujubKltN99qRoFAxhQcnYE2KlYKw7lRcExq6mDSYAyk5xJZ1ZFdLj6MAryZroQit/0g5eyhoNEKwWbi8px5j71pRTf7yjN+deMGQKwbGl+3OgaL1UZ5fCjypbVL60kpIBxLZwIJ7p3jJ+q9pbq9zSdzshPYor5lxyUfXqaso/0/91ayNoBzg4hQGh618PhFI6RMGjwkzhB9xk74iweJ9HQyIHf8yx2RCSI22JuCMitPMWSGvOszhbNx3AEDLuiiAOHg391mprEtKZguOIr9LrJwem/YmcHbwyz5YAbZmiseKPkllfC7dafFfCFEkj6R2oegIsZo0pEKYisAXBqT0g+6/jGwuhlZcBo0f7UIZm88iA3MrJCjlXEgV5OcQdoWj+hq0lKEdnhtCKr03AIfukN6+4vjjarZeW1bs0swq0l3XFf5RHa11otshMS4mpewshB9iO9MuKWpRxuxeng4PlKZ/zuBqmPeUrjJ9454oK35Pq+dghfemt7AUpBH/KycDNIZgfdEWUZrRKBGnc519C+RTqxyt5hWL18nJk4LvSd3QKlJ1iyJxClhhb/NWEzPqNdyA5cxen+2T9bd/EqJ2KzRv5/BPVwTQkHH9W/TZElFyvFfOFIW2+03RKbVGw72Mr/0xKZ+awAnEfoU+SL/2Gj2m6PHkqFX2sOCi/tN9EA4xgdswEwYJKoZIhvcNAQkVMQYEBAEAAAAwXQYJKwYBBAGCNxEBMVAeTgBNAGkAYwByAG8AcwBvAGYAdAAgAFMAdAByAG8AbgBnACAAQwByAHkAcAB0AG8AZwByAGEAcABoAGkAYwAgAFAAcgBvAHYAaQBkAGUAcjBlBgkqhkiG9w0BCRQxWB5WAFAAdgBrAFQAbQBwADoANABjAGUANgAwADQAZABhAC0AMAA2ADgAMQAtADQANAAxADUALQBhADIAYwBhAC0ANQA3ADcAMwAwADgAZQA2AGQAOQBhAGMwggIOBgkqhkiG9w0BBwGgggH/BIIB+zCCAfcwggHzBgsqhkiG9w0BDAoBA6CCAcswggHHBgoqhkiG9w0BCRYBoIIBtwSCAbMwggGvMIIBXaADAgECAhAdka3aTQsIsUphgIXGUmeRMAkGBSsOAwIdBQAwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3kwHhcNMTYwMTAxMDcwMDAwWhcNMTgwMTAxMDcwMDAwWjASMRAwDgYDVQQDEwdub2Rlc2RrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC5fhcxbJHxxBEIDzVOMc56s04U6k4GPY7yMR1m+rBGVRiAyV4RjY6U936dqXHCVD36ps2Q0Z+OeEgyCInkIyVeB1EwXcToOcyeS2YcUb0vRWZDouC3tuFdHwiK1Ed5iW/LksmXDotyV7kpqzaPhOFiMtBuMEwNJcPge9k17hRgRQIDAQABo0swSTBHBgNVHQEEQDA+gBAS5AktBh0dTwCNYSHcFmRjoRgwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3mCEAY3bACqAGSKEc+41KpcNfQwCQYFKw4DAh0FAANBAHl2M97QbpzdnwO5HoRBsiEExOcLTNg+GKCr7HUsbzfvrUivw+JLL7qjHAIc5phnK+F5bQ8HKe0L9YXBSKl+fvwxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFGVtyGMqiBd32fGpzlGZQoRM6UQwBBTI0YHFFqTS4Go8CoLgswn29EiuUQICB9A=',
format=models.CertificateFormat.pfx,
password='nodesdk')
certificate = 'SHA1-cff2ab63c8c955aaf71989efa641b906558d9fb7'
response = self.mgmt_batch_client.certificate.create(resource_group.name, batch_account.name, certificate, parameters)
self.assertIsInstance(response.result(), models.Certificate)
# Test List Certificates
certs = self.mgmt_batch_client.certificate.list_by_batch_account(resource_group.name, batch_account.name)
self.assertEqual(len(list(certs)), 1)
# Test Get Certificate
cert = self.mgmt_batch_client.certificate.get(resource_group.name, batch_account.name, certificate)
self.assertIsInstance(cert, models.Certificate)
self.assertEqual(cert.thumbprint.lower(), 'cff2ab63c8c955aaf71989efa641b906558d9fb7')
self.assertEqual(cert.thumbprint_algorithm, 'SHA1')
self.assertIsNone(cert.delete_certificate_error)
# Test Update Certiciate
parameters = models.CertificateCreateOrUpdateParameters(
password='nodesdk',
data='MIIGMQIBAzCCBe0GCSqGSIb3DQEHAaCCBd4EggXaMIIF1jCCA8AGCSqGSIb3DQEHAaCCA7EEggOtMIIDqTCCA6UGCyqGSIb3DQEMCgECoIICtjCCArIwHAYKKoZIhvcNAQwBAzAOBAhyd3xCtln3iQICB9AEggKQhe5P10V9iV1BsDlwWT561Yu2hVq3JT8ae/ebx1ZR/gMApVereDKkS9Zg4vFyssusHebbK5pDpU8vfAqle0TM4m7wGsRj453ZorSPUfMpHvQnAOn+2pEpWdMThU7xvZ6DVpwhDOQk9166z+KnKdHGuJKh4haMT7Rw/6xZ1rsBt2423cwTrQVMQyACrEkianpuujubKltN99qRoFAxhQcnYE2KlYKw7lRcExq6mDSYAyk5xJZ1ZFdLj6MAryZroQit/0g5eyhoNEKwWbi8px5j71pRTf7yjN+deMGQKwbGl+3OgaL1UZ5fCjypbVL60kpIBxLZwIJ7p3jJ+q9pbq9zSdzshPYor5lxyUfXqaso/0/91ayNoBzg4hQGh618PhFI6RMGjwkzhB9xk74iweJ9HQyIHf8yx2RCSI22JuCMitPMWSGvOszhbNx3AEDLuiiAOHg391mprEtKZguOIr9LrJwem/YmcHbwyz5YAbZmiseKPkllfC7dafFfCFEkj6R2oegIsZo0pEKYisAXBqT0g+6/jGwuhlZcBo0f7UIZm88iA3MrJCjlXEgV5OcQdoWj+hq0lKEdnhtCKr03AIfukN6+4vjjarZeW1bs0swq0l3XFf5RHa11otshMS4mpewshB9iO9MuKWpRxuxeng4PlKZ/zuBqmPeUrjJ9454oK35Pq+dghfemt7AUpBH/KycDNIZgfdEWUZrRKBGnc519C+RTqxyt5hWL18nJk4LvSd3QKlJ1iyJxClhhb/NWEzPqNdyA5cxen+2T9bd/EqJ2KzRv5/BPVwTQkHH9W/TZElFyvFfOFIW2+03RKbVGw72Mr/0xKZ+awAnEfoU+SL/2Gj2m6PHkqFX2sOCi/tN9EA4xgdswEwYJKoZIhvcNAQkVMQYEBAEAAAAwXQYJKwYBBAGCNxEBMVAeTgBNAGkAYwByAG8AcwBvAGYAdAAgAFMAdAByAG8AbgBnACAAQwByAHkAcAB0AG8AZwByAGEAcABoAGkAYwAgAFAAcgBvAHYAaQBkAGUAcjBlBgkqhkiG9w0BCRQxWB5WAFAAdgBrAFQAbQBwADoANABjAGUANgAwADQAZABhAC0AMAA2ADgAMQAtADQANAAxADUALQBhADIAYwBhAC0ANQA3ADcAMwAwADgAZQA2AGQAOQBhAGMwggIOBgkqhkiG9w0BBwGgggH/BIIB+zCCAfcwggHzBgsqhkiG9w0BDAoBA6CCAcswggHHBgoqhkiG9w0BCRYBoIIBtwSCAbMwggGvMIIBXaADAgECAhAdka3aTQsIsUphgIXGUmeRMAkGBSsOAwIdBQAwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3kwHhcNMTYwMTAxMDcwMDAwWhcNMTgwMTAxMDcwMDAwWjASMRAwDgYDVQQDEwdub2Rlc2RrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC5fhcxbJHxxBEIDzVOMc56s04U6k4GPY7yMR1m+rBGVRiAyV4RjY6U936dqXHCVD36ps2Q0Z+OeEgyCInkIyVeB1EwXcToOcyeS2YcUb0vRWZDouC3tuFdHwiK1Ed5iW/LksmXDotyV7kpqzaPhOFiMtBuMEwNJcPge9k17hRgRQIDAQABo0swSTBHBgNVHQEEQDA+gBAS5AktBh0dTwCNYSHcFmRjoRgwFjEUMBIGA1UEAxMLUm9vdCBBZ2VuY3mCEAY3bACqAGSKEc+41KpcNfQwCQYFKw4DAh0FAANBAHl2M97QbpzdnwO5HoRBsiEExOcLTNg+GKCr7HUsbzfvrUivw+JLL7qjHAIc5phnK+F5bQ8HKe0L9YXBSKl+fvwxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFGVtyGMqiBd32fGpzlGZQoRM6UQwBBTI0YHFFqTS4Go8CoLgswn29EiuUQICB9A=',)
response = self.mgmt_batch_client.certificate.update(resource_group.name, batch_account.name, certificate, parameters)
self.assertIsInstance(response, models.Certificate)
# Test Cancel Certificate Delete
#with self.assertRaises(models.DeleteCertificateError):
self.mgmt_batch_client.certificate.cancel_deletion(
resource_group.name, batch_account.name, certificate)
# Test Delete Certificate
response = self.mgmt_batch_client.certificate.delete(resource_group.name, batch_account.name, certificate)
self.assertIsNone(response.result())
@ResourceGroupPreparer(location=AZURE_LOCATION)
@SimpleBatchPreparer(location=AZURE_LOCATION)
def test_mgmt_batch_pools(self, resource_group, location, batch_account):
# Test create PAAS pool
paas_pool = "test_paas_pool"
parameters = models.Pool(
display_name="test_pool",
vm_size='small',
deployment_configuration=models.DeploymentConfiguration(
cloud_service_configuration=models.CloudServiceConfiguration(os_family='5')
),
start_task=models.StartTask(
command_line="cmd.exe /c \"echo hello world\"",
resource_files=[models.ResourceFile(http_url='https://blobsource.com', file_path='filename.txt')],
environment_settings=[models.EnvironmentSetting('ENV_VAR', 'env_value')],
user_identity=models.UserIdentity(
auto_user=models.AutoUserSpecification(
elevation_level=models.ElevationLevel.admin
)
)
),
user_accounts=[models.UserAccount('UserName', 'p@55wOrd')],
scale_settings=models.ScaleSettings(
fixed_scale=models.FixedScaleSettings(
target_dedicated_nodes=0,
target_low_priority_nodes=0
)
)
)
response = self.mgmt_batch_client.pool.create(
resource_group.name, batch_account.name, paas_pool, parameters)
self.assertIsInstance(response.result(), models.Pool)
# Test create IAAS pool
iaas_pool = "test_iaas_pool"
parameters = models.Pool(
display_name="test_pool",
vm_size='Standard_A1',
deployment_configuration=models.DeploymentConfiguration(
virtual_machine_configuration=models.VirtualMachineConfiguration(
image_reference=models.ImageReference(
publisher='MicrosoftWindowsServer',
offer='WindowsServer',
sku='2016-Datacenter-smalldisk'
),
node_agent_sku_id='batch.node.windows amd64',
windows_configuration=models.WindowsConfiguration(True)
)
),
scale_settings=models.ScaleSettings(
fixed_scale=models.FixedScaleSettings(
target_dedicated_nodes=0,
target_low_priority_nodes=0
)
)
)
response = self.mgmt_batch_client.pool.create(
resource_group.name, batch_account.name, iaas_pool, parameters)
self.assertIsInstance(response.result(), models.Pool)
# Test list pools
pools = self.mgmt_batch_client.pool.list_by_batch_account(resource_group.name, batch_account.name)
self.assertEqual(len(list(pools)), 2)
# Test Update pool
parameters = models.Pool(
scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
formula='$TargetDedicatedNodes=1'
)
)
)
response = self.mgmt_batch_client.pool.update(
resource_group.name, batch_account.name, iaas_pool, parameters)
self.assertIsInstance(response, models.Pool)
# Test Get pool
pool = self.mgmt_batch_client.pool.get(
resource_group.name, batch_account.name, iaas_pool)
self.assertIsInstance(pool, models.Pool)
self.assertEqual(pool.vm_size, 'STANDARD_A1'),
self.assertIsNone(pool.display_name),
self.assertEqual(pool.allocation_state, models.AllocationState.resizing)
self.assertEqual(
pool.deployment_configuration.virtual_machine_configuration.node_agent_sku_id,
'batch.node.windows amd64')
# Test stop resizing
with self.assertRaises(CloudError):
self.mgmt_batch_client.pool.stop_resize(resource_group.name, batch_account.name, iaas_pool)
if self.is_live:
time.sleep(300)
# Test disable auto-scale
response = self.mgmt_batch_client.pool.disable_auto_scale(
resource_group.name, batch_account.name, iaas_pool)
self.assertIsInstance(response, models.Pool)
# Test delete pool
response = self.mgmt_batch_client.pool.delete(
resource_group.name, batch_account.name, iaas_pool)
self.assertIsNone(response.result()) | account_name = self._get_account_name()
account_setup = self.mgmt_batch_client.batch_account.create( |
test_imageutil.py | """
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import pytest
import tarfile
import io
import os
from flexmock import flexmock
from pathlib import Path
from osbs.utils import ImageName
from atomic_reactor import config
from atomic_reactor import util
from atomic_reactor.utils import imageutil, retries
@pytest.fixture
def df_images():
"""DockerfileImages instance for testing."""
return util.DockerfileImages(["registry.com/fedora:35"])
@pytest.mark.parametrize(
"image, is_inspectable",
[
("scratch", False),
("koji/image-build", False),
("registry.com/foo/bar", True),
# does not work, nobody should ever try to use scratch as an ImageName
# (ImageName.parse("scratch"), False),
(ImageName.parse("koji/image-build"), False),
(ImageName.parse("registry.com/foo/bar"), True),
],
)
def test_inspectable(image, is_inspectable):
assert imageutil.image_is_inspectable(image) == is_inspectable
def mock_tarball(tarball_path, files):
with tarfile.open(tarball_path, 'w:gz') as tf:
for filename, file_data in files.items():
file = tarfile.TarInfo(filename)
file.size = file_data['size']
if file_data['content']:
tf.addfile(file, io.BytesIO(file_data['content']))
else:
tf.addfile(file, io.BytesIO(os.urandom(file.size)))
class TestImageUtil:
"""Tests for the ImageUtil class."""
config = config.Configuration(
raw_config={
"version": 1,
# "registries": [], # relevant to RegistrySession, not directly relevant to ImageUtil
"platform_descriptors": [{"platform": "x86_64", "architecture": "amd64"}],
},
)
inspect_data = {"some": "inspect data as returned by RegistryClient.get_inspect_for_image"}
def mock_get_registry_client(self, expect_image, expect_arch):
"""Make the _get_registry_client method return a fake RegistryClient."""
registry_client = flexmock()
(
registry_client
.should_receive("get_inspect_for_image")
.with_args(expect_image, expect_arch)
.once()
.and_return(self.inspect_data)
)
(
flexmock(imageutil.ImageUtil)
.should_receive("_get_registry_client")
.with_args(expect_image.registry)
.once()
.and_return(registry_client)
)
return registry_client
def test_get_inspect_for_image(self, df_images):
"""Test get_inspect_for_image and its caching behavior."""
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
self.mock_get_registry_client(image, expect_arch=None)
assert image_util.get_inspect_for_image(image) == self.inspect_data
# check caching (the registry client mock expects its method to be called exactly once,
# if imageutil didn't cache the result, it would get called twice)
assert image_util.get_inspect_for_image(image) == self.inspect_data
image_as_str = image.to_str()
# should hit cache regardless of whether you pass a string or an ImageName
assert image_util.get_inspect_for_image(image_as_str) == self.inspect_data
@pytest.mark.parametrize(
"platform, expect_goarch",
[
("x86_64", "amd64"), # platform is mapped to goarch
("s390x", "s390x"), # platform is not mapped (goarch name is the same)
("amd64", "amd64"), # pass goarch directly
],
)
def test_get_inspect_for_image_specific_platform(self, platform, expect_goarch, df_images):
"""Test that get_inspect_for_image handles the platform to goarch mapping properly."""
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
# main check: expect_arch
self.mock_get_registry_client(image, expect_arch=expect_goarch)
assert image_util.get_inspect_for_image(image, platform) == self.inspect_data
# should hit cache regardless of whether you pass a platform or a goarch
assert image_util.get_inspect_for_image(image, expect_goarch) == self.inspect_data
def test_get_inspect_for_image_not_inspectable(self, df_images):
"""Test that passing a non-inspectable image raises an error."""
image_util = imageutil.ImageUtil(df_images, self.config)
custom_image = ImageName.parse("koji/image-build")
with pytest.raises(ValueError, match=r"ImageName\(.*\) is not inspectable"):
image_util.get_inspect_for_image(custom_image)
@pytest.mark.parametrize("platform", [None, "x86_64"])
def test_base_image_inspect(self, platform, df_images):
"""Test that base_image_inspect just calls get_inspect_for_image with the right args."""
image_util = imageutil.ImageUtil(df_images, self.config)
(
flexmock(image_util)
.should_receive("get_inspect_for_image")
# base image in df_images
.with_args(ImageName.parse("registry.com/fedora:35"), platform)
.once()
.and_return(self.inspect_data)
)
assert image_util.base_image_inspect(platform) == self.inspect_data
@pytest.mark.parametrize("base_image", ["scratch", "koji/image-build"])
def test_base_image_inspect_not_inspectable(self, base_image):
"""Test that inspecting a non-inspectable base image returns an empty dict."""
image_util = imageutil.ImageUtil(util.DockerfileImages([base_image]), self.config)
assert image_util.base_image_inspect() == {}
def test_get_registry_client(self):
"""Test the method that makes a RegistryClient (other tests mock this method)."""
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
registry_session = flexmock()
(
flexmock(util.RegistrySession)
.should_receive("create_from_config")
.with_args(self.config, "registry.com")
.once()
.and_return(registry_session)
)
flexmock(util.RegistryClient).should_receive("__init__").with_args(registry_session).once()
image_util._get_registry_client("registry.com")
# test caching (i.e. test that the create_from_config method is called only once)
image_util._get_registry_client("registry.com")
def test_extract_file_from_image_non_empty_dst_dir(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
file = dst_path / 'somefile.txt'
file.touch()
with pytest.raises(ValueError, match=f'the destination directory {dst_path} must be empty'):
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_extract_file_from_image_no_file_extracted(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.once()
)
with pytest.raises(
ValueError,
match=f"Extraction failed, files at path {src_path} not found in the image",
):
image_util.extract_file_from_image(
image=image, src_path=src_path, dst_path=dst_path
)
def test_extract_file_from_image(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
# mock the functionality of oc image extract
# just creates a file in dst_path
def mock_extract_file(cmd):
file = dst_path / 'somefile.txt'
file.touch()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.replace_with(mock_extract_file).once()
)
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_download_image_archive_tarball(self):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
path = '/tmp/path'
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['skopeo', 'copy', f'docker://{image}', f'docker-archive:{path}'])
.once()
)
image_util.download_image_archive_tarball(image=image, path=path)
def test_get_uncompressed_image_layer_sizes(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
config_file_content = (
'{"rootfs": {"type": "layers", "diff_ids": '
'["sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", '
'"sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", '
'"sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", '
'"sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b"]}}'
).encode("utf-8")
mock_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": { | },
"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json": {
"content": config_file_content,
"size": len(config_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
actual_data = image_util.get_uncompressed_image_layer_sizes(path=path)
expected_data = [
{
"diff_id": "sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", # noqa
"size": 1,
},
{
"diff_id": "sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", # noqa
"size": 2,
},
{
"diff_id": "sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", # noqa
"size": 3,
},
{
"diff_id": "sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b", # noqa
"size": 4,
},
]
assert actual_data == expected_data
def test_get_uncompressed_image_layer_sizes_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}, '
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode('utf-8')
mock_files = {
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.get_uncompressed_image_layer_sizes(path=path)
def test_extract_filesystem_layer(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]}]'
).encode('utf-8')
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
actual_layer_filename = image_util.extract_filesystem_layer(src_path, dst_path)
assert actual_layer_filename == expected_layer_filename
assert (dst_path / expected_layer_filename).exists()
def test_extract_filesystem_layer_more_than_one_layer_fail(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
mocked_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(ValueError, match=f'Tarball at {src_path} has more than 1 layer'):
image_util.extract_filesystem_layer(src_path, dst_path)
def test_extract_filesystem_layer_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]},'
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode("utf-8")
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.extract_filesystem_layer(src_path, dst_path) | "content": manifest_file_content,
"size": len(manifest_file_content), |
file_darwin.go | // +build !ios
package dialog
import (
"os"
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/storage"
)
func getFavoriteLocations() (map[string]fyne.ListableURI, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
homeURI := storage.NewFileURI(homeDir)
favoriteNames := append(getFavoriteOrder(), "Home")
favoriteLocations := make(map[string]fyne.ListableURI)
for _, favName := range favoriteNames {
var uri fyne.URI
var err1 error
if favName == "Home" {
uri = homeURI
} else {
uri, err1 = storage.Child(homeURI, favName)
}
if err1 != nil |
listURI, err1 := storage.ListerForURI(uri)
if err1 != nil {
err = err1
continue
}
favoriteLocations[favName] = listURI
}
return favoriteLocations, err
}
| {
err = err1
continue
} |
worker_pool_test.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency_test
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/uber/makisu/lib/concurrency"
)
func TestWorkerPool(t *testing.T) |
func TestWorkerPoolStop(t *testing.T) {
require := require.New(t)
pool := concurrency.NewWorkerPool(5)
count := int32(0)
for i := 0; i < 5; i++ {
pool.Do(func() {
time.Sleep(1 * time.Millisecond)
atomic.AddInt32(&count, 1)
})
}
var wg sync.WaitGroup
wg.Add(1)
pool.Do(func() {
defer wg.Done()
atomic.AddInt32(&count, 1)
pool.Stop()
})
wg.Wait()
// Some future tasks will be executed after stop is called.
for i := 6; i < 100; i++ {
pool.Do(func() {
time.Sleep(1 * time.Millisecond)
atomic.AddInt32(&count, 1)
})
}
pool.Wait()
require.True(count >= 6)
}
| {
require := require.New(t)
pool := concurrency.NewWorkerPool(100)
count := int32(0)
for i := 0; i < 100; i++ {
pool.Do(func() {
time.Sleep(1 * time.Millisecond)
atomic.AddInt32(&count, 1)
})
}
pool.Wait()
require.Equal(int32(100), count)
} |
encoding.rs | //! This module provides the central representation of an RNA sequence enabling computation
//! of an autocorrelation using FFT-based convolution.
//! Nucleotides of a sequence are encoded as tuples:
//!
//! - `A = (1, 0, 0, 0)`
//! - `C = (0, 1, 0, 0)`
//! - `G = (0, 0, 1, 0)`
//! - `U = (0, 0, 0, 1)`
//!
//! Additionally, a _mirrored_ copy of the sequence is encoded in reverse using a complementary (in a sense)
//! alphabet, effectively carrying information about the strength of legal base pairs:
//!
//! - `a = (0, 0, 0, AU)`
//! - `c = (0, 0, GC, 0)`
//! - `g = (0, GC, 0, GU)`
//! - `u = (AU, 0, GU, 0)`
//!
//! where `AU`, `GC`, `GU` are weights of the base pairs.
use ndarray::{arr1, s, Array1, Array2, ArrayView1, Axis};
use std::convert::TryInto;
use thiserror::Error;
/// Error type representing errors that may arise during sequence parsing or encoding.
#[derive(Error, Debug)]
pub enum Error {
/// Error variant corresponding to invalid nucleotides in the supplied sequence string.
#[error("invalid nucleotide (expected one of [A, C, G, U], found {0:?})")]
InvalidNucleotide(char),
}
// emulating an enum with array variants
#[allow(non_snake_case)]
mod Alphabet {
pub(crate) const A: [f64; 4] = [1.0, 0.0, 0.0, 0.0];
pub(crate) const C: [f64; 4] = [0.0, 1.0, 0.0, 0.0];
pub(crate) const G: [f64; 4] = [0.0, 0.0, 1.0, 0.0];
pub(crate) const U: [f64; 4] = [0.0, 0.0, 0.0, 1.0];
}
/// See the [module-level description](crate::encoding).
#[allow(missing_docs)]
#[allow(non_snake_case)]
pub struct BasePairWeights {
pub AU: f64,
pub GC: f64,
pub GU: f64,
}
#[allow(non_snake_case)]
struct MirrorAlphabet {
A: Array1<f64>,
C: Array1<f64>,
G: Array1<f64>,
U: Array1<f64>,
}
impl MirrorAlphabet {
pub fn new(weights: &BasePairWeights) -> Self {
Self {
A: arr1(&[0.0, 0.0, 0.0, weights.AU]),
C: arr1(&[0.0, 0.0, weights.GC, 0.0]),
G: arr1(&[0.0, weights.GC, 0.0, weights.GU]),
U: arr1(&[weights.AU, 0.0, weights.GU, 0.0]),
}
}
}
impl Default for MirrorAlphabet {
fn default() -> Self {
Self {
A: arr1(&[0.0, 0.0, 0.0, 1.0]),
C: arr1(&[0.0, 0.0, 1.0, 0.0]),
G: arr1(&[0.0, 1.0, 0.0, 1.0]),
U: arr1(&[1.0, 0.0, 1.0, 0.0]),
}
}
}
/// An [`EncodedSequence`] consists of a _forward_ encoding and a _mirrored_ encoding.
/// See the [module-level description](crate::encoding) for details.
#[derive(Debug, Clone)]
pub struct EncodedSequence {
pub(crate) forward: Array2<f64>,
pub(crate) mirrored: Array2<f64>,
//subsequences will carry information about the positions of their parent sequence
pub(crate) parent_indices: Array1<usize>,
}
impl EncodedSequence {
/// Encode an RNA sequence with given [`BasePairWeights`] being stored in the mirrored encoded sequence.
pub fn with_basepair_weights(sequence: &str, weights: &BasePairWeights) -> Result<Self, Error> {
let mirrored_alphabet = MirrorAlphabet::new(weights);
let length = sequence.len();
let mut forward = Array2::default((4, length));
let mut mirrored = Array2::default((4, length));
// 1-indexed for convenience
let parent_indices = Array1::from_iter(1..=length);
match sequence.chars().enumerate().try_for_each(|(i, c)| match c {
'A' => {
forward
.column_mut(i)
.zip_mut_with(&arr1(&Alphabet::A), |ci, ni| *ci = *ni);
mirrored
.column_mut(i)
.zip_mut_with(&mirrored_alphabet.A.view(), |ci, ni| *ci = *ni);
Ok(())
}
'C' => {
forward
.column_mut(i)
.zip_mut_with(&arr1(&Alphabet::C), |ci, ni| *ci = *ni);
mirrored
.column_mut(i)
.zip_mut_with(&mirrored_alphabet.C.view(), |ci, ni| *ci = *ni);
Ok(())
}
'G' => {
forward
.column_mut(i)
.zip_mut_with(&arr1(&Alphabet::G), |ci, ni| *ci = *ni);
mirrored
.column_mut(i)
.zip_mut_with(&mirrored_alphabet.G.view(), |ci, ni| *ci = *ni);
Ok(())
}
'U' => {
forward
.column_mut(i)
.zip_mut_with(&arr1(&Alphabet::U), |ci, ni| *ci = *ni);
mirrored
.column_mut(i)
.zip_mut_with(&mirrored_alphabet.U.view(), |ci, ni| *ci = *ni);
Ok(())
}
_ => Err(Error::InvalidNucleotide(c)),
}) {
Err(e) => Err(e),
_ => Ok(Self {
forward,
mirrored,
parent_indices,
}),
}
}
/// Encode an RNA sequence with equal [`BasePairWeights`].
pub fn new(sequence: &str) -> Result<Self, Error> {
Self::with_basepair_weights(
sequence,
&BasePairWeights {
AU: 1.0,
GC: 1.0,
GU: 1.0,
},
)
}
/// Return the length of the encoded sequence.
pub fn len(&self) -> usize {
self.forward.len_of(Axis(1))
}
/// Return whether the encoded sequence is empty.
pub fn is_empty(&self) -> bool {
self.forward.is_empty()
}
/// Create a subsequence from an `EncodedSequence`.
/// Currently, this allocates new memory instead of slicing or some copy-on-write behaviour.
/// The range defined by `start` and `end` is exclusive.
/// If `start >= end`, a contiguous [`EncodedSequence`] is newly created, omitting the inner
/// region enclosed by both parameters.
pub fn subsequence(&self, start: usize, end: usize) -> Self {
if start < end {
let sub_fwd = self.forward.slice(s![.., start..end]);
let sub_mrrd = self.mirrored.slice(s![.., start..end]);
let sub_indices = self.parent_indices.slice(s![start..end]);
Self {
forward: sub_fwd.to_owned(),
mirrored: sub_mrrd.to_owned(),
parent_indices: sub_indices.to_owned(),
}
} else {
// let indices: Vec<usize> = (0..end).chain(start..self.len())
// should work as well since it does not change pairing
// in which case `end` should be stored as concatenation site
let indices: Vec<usize> = (0..end).chain(start..self.len()).collect();
//let indices: Vec<usize> = (start..self.len()).chain(0..end).collect();
// double-select to force C standard layout
// this is hacky and not as efficient as possible but should suffice for now
let sub_fwd = self
.forward
.select(Axis(1), &indices)
.select(Axis(0), &[0, 1, 2, 3]);
let sub_mrrd = self
.mirrored
.select(Axis(1), &indices)
.select(Axis(0), &[0, 1, 2, 3]);
let sub_indices = self.parent_indices.select(Axis(0), &indices);
Self {
forward: sub_fwd,
mirrored: sub_mrrd,
parent_indices: sub_indices,
}
}
}
}
impl EncodedSequence {
/// Search for the longest sequence of consecutive pairs of the encoded sequence and its (reversed) mirror
/// offset-aligned by `positional_lag` using a sliding-window approach.
///
/// Sequences of consecutive pairs are prohibited from spanning over concatenation sites.
/// This may the case if `self` was constructed as a subsequence.
///
/// `minimal_hairpin` is the number of unpaired positions enclosed by a stack of consecutive pairs.
/// A sane default value is `3`.
///
/// Returns a quadruple containing the number of pairs in the sequence,
/// the first paired positions of both strands, and a score based on the underlying [`BasePairWeights`]
pub fn consecutive_pairs_at_lag(
&self,
positional_lag: usize,
minimal_hairpin: usize,
) -> (usize, usize, usize, f64) {
// Slicing this way since self.mirrored is stored in the same direction as self.forward
let (fwd_sliceinfo, mrrd_sliceinfo) = if positional_lag < self.len() {
(s![.., ..=positional_lag], s![.., ..=positional_lag;-1])
} else {
(
s![.., positional_lag - self.len() + 1..],
s![.., positional_lag - self.len() + 1..;-1],
)
};
let fwd_slice = self.forward.slice(fwd_sliceinfo);
let mrrd_slice = self.mirrored.slice(mrrd_sliceinfo);
// Slide over half of the offset-aligned sequences since they are complementary
let halved_length = fwd_slice.len_of(Axis(1)) / 2 + fwd_slice.len_of(Axis(1)) % 2;
// The total pairing score per position is computed as the pairwise product
// of the offset-aligned sequences (actually, only their first halves)
// and then summed over all four nucleotides.
let mut total_pairing_scores = (fwd_slice.slice(s![.., ..halved_length]).to_owned()
* mrrd_slice.slice(s![.., ..halved_length]))
.sum_axis(Axis(0));
// not very idiomatic but I'm trying to stay close to the reference implementation
// the essential functionality could be done simpler but I want to reproduce intermediate results
let mut i = 0;
let mut max_i = 0;
let mut max_score = 0.0;
let mut acc_pairs = if total_pairing_scores[0] == 0.0 { 0 } else { 1 };
let mut max_pairs = 0;
let (mut max_lower, mut max_upper) = if positional_lag < self.len() {
(0, positional_lag)
} else {
(positional_lag - self.len() + 1, self.len() - 1)
};
if total_pairing_scores[0] >= 0.0
&& self.parent_indices[max_upper] - self.parent_indices[max_lower] > minimal_hairpin
{
max_score = total_pairing_scores[0];
max_pairs = acc_pairs;
} else {
max_lower = 0;
max_upper = 0;
}
let accumulate_scores = |&prev: &f64, curr: &mut f64| {
i += 1;
let (lower_position, upper_position) = if positional_lag < self.len() {
(i, positional_lag - i)
} else {
(positional_lag - self.len() + 1 + i, self.len() - i - 1)
};
if self.parent_indices[lower_position] - self.parent_indices[lower_position - 1] == 1
&& self.parent_indices[upper_position + 1] - self.parent_indices[upper_position]
== 1
{
*curr *= prev + *curr;
}
if *curr > 0.0 {
acc_pairs += 1;
} else {
acc_pairs = 0;
}
let distance =
self.parent_indices[upper_position] - self.parent_indices[lower_position];
if *curr >= max_score
// check if there are at least 3 unpaired positions between paired positions of stack
&& distance > minimal_hairpin
{
max_score = *curr;
max_i = i;
max_upper = upper_position;
max_lower = lower_position;
max_pairs = acc_pairs;
}
};
total_pairing_scores.accumulate_axis_inplace(Axis(0), accumulate_scores);
(max_pairs, max_lower, max_upper, max_score)
}
}
/// A wrapper type for pair tables in `ViennaRNA`.
/// This struct stores `i16` internally and is `1`-indexed.
///
/// Refer to the [upstream API](https://www.tbi.univie.ac.at/RNA/ViennaRNA/doc/html/group__struct__utils__pair__table.html) for details.
// Why is Array1<i16> not Copy?
#[derive(Clone, PartialEq, Eq)]
pub struct PairTable(Array1<i16>);
impl PairTable {
/// Create a new [`PairTable`].
pub fn new(length: usize) -> Self {
let mut inner = Array1::zeros(length + 1);
inner[0] = length.try_into().unwrap();
PairTable(inner)
}
/// Return the `length` of the represented structure.
/// The internal representation has `length + 1` elements for compatibility with ViennaRNA.
pub fn len(&self) -> usize {
self.0[0] as usize
}
/// Return whether the represented structure is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return an iterator over all unpaired positions (`1`-indexed).
pub fn unpaired(&self) -> impl Iterator<Item = usize> + '_ {
self.0
.indexed_iter()
.filter(|(_, &u)| u == 0)
.map(|(i, _)| i as usize)
}
/// Return an iterater over all ordered tuples of paired positions (`1-indexed`).
pub fn paired(&self) -> impl Iterator<Item = (usize, usize)> + '_ {
self.0
.indexed_iter()
.skip(1)
.filter(|(i, &u)| i < &(u as usize))
.map(|(i, &u)| (i, u as usize))
}
/// Return the number of pairs in the `PairTable`.
pub fn pairs(&self) -> usize |
/// Insert a new pair into the [`PairTable`].
/// Does not check for crossing pairs.
/// Panics if supplied positions are out of range or inserting `i` or `j` would conflict with other pairs.
/// However, re-inserting pairs is accepted, albeit not a no-op.
pub fn insert(&mut self, i: i16, j: i16) {
assert!(0 < i && i <= self.len().try_into().unwrap());
assert!(0 < j && j <= self.len().try_into().unwrap());
assert_ne!(i, j);
//assert_eq!(self.0[i as usize], 0);
//assert_eq!(self.0[j as usize], 0);
assert!(self.0[i as usize] == 0 || self.0[i as usize] == j);
assert!(self.0[j as usize] == 0 || self.0[j as usize] == i);
self.0[i as usize] = j;
self.0[j as usize] = i;
}
/// Return a view of the inner array.
pub fn view(&self) -> ArrayView1<i16> {
self.0.view()
}
}
impl ToString for PairTable {
/// Return the dot-bracket notation of the PairTable.
fn to_string(&self) -> String {
self.0
.indexed_iter()
.skip(1)
.map(|(i, &j)| {
if j == 0 {
'.'
} else if i < j as usize {
'('
} else {
')'
}
})
.collect::<String>()
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::Array;
#[test]
fn test_encoding() {
let sequence =
"GGGUUUGCGGUGUAAGUGCAGCCCGUCUUACACCGUGCGGCACAGGCACUAGUACUGAUGUCGUAUACAGGGCUUUUGACAU";
let bpw = BasePairWeights {
AU: 2.0,
GC: 3.0,
GU: 1.0,
};
let encoded = EncodedSequence::with_basepair_weights(sequence, &bpw).unwrap();
let fwd = Array::from_vec(vec![
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1.,
0., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.,
1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 1., 1., 0., 0., 1., 0.,
0., 0., 1., 0., 1., 1., 0., 0., 0., 1., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 1., 0.,
0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,
1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 0., 1.,
0., 0., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
1., 0., 1., 0., 1., 1., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,
1., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0., 1., 0.,
0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1.,
])
.into_shape((4, 82))
.unwrap();
/*let mrrd = Array::from_vec(vec![
2., 0., 0., 0., 0., 2., 2., 2., 2., 0., 0., 0., 0., 0., 0., 0., 2., 0., 2., 0., 0., 2.,
0., 2., 0., 0., 2., 0., 0., 2., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 2., 0., 0., 0., 0., 0., 0., 2., 2., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0., 2.,
0., 0., 0., 2., 0., 2., 0., 0., 0., 0., 2., 2., 2., 0., 0., 0., 0., 0., 0., 0., 3., 0.,
0., 0., 0., 0., 3., 3., 3., 0., 0., 0., 0., 0., 0., 3., 0., 0., 3., 0., 0., 3., 0., 0.,
0., 0., 3., 0., 0., 0., 0., 0., 3., 3., 0., 0., 0., 0., 3., 3., 0., 3., 0., 3., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 3., 0., 0., 0., 3., 0., 0., 3., 0., 3., 0., 0., 0., 3., 0.,
3., 3., 0., 3., 0., 0., 0., 3., 3., 3., 1., 0., 3., 0., 0., 1., 1., 1., 1., 3., 0., 0.,
0., 0., 3., 0., 1., 0., 1., 0., 3., 1., 0., 1., 0., 0., 1., 3., 0., 1., 0., 0., 1., 3.,
0., 3., 0., 0., 0., 3., 0., 3., 0., 0., 3., 0., 1., 0., 3., 3., 0., 3., 0., 1., 1., 3.,
1., 0., 3., 3., 3., 0., 0., 3., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 3., 0., 1., 1.,
1., 0., 0., 0., 0., 2., 0., 2., 1., 0., 0., 0., 0., 0., 1., 1., 1., 2., 0., 2., 0., 2.,
0., 1., 0., 0., 1., 0., 2., 1., 0., 0., 2., 0., 1., 2., 0., 0., 2., 0., 1., 1., 2., 0.,
2., 0., 1., 1., 0., 1., 0., 1., 0., 0., 2., 0., 2., 0., 0., 0., 0., 1., 0., 0., 0., 1.,
2., 0., 1., 0., 1., 2., 2., 0., 1., 0., 1., 1., 0., 1., 0., 0., 0., 1., 1., 1.,
])
.into_shape((4, 82))
.unwrap();*/
let mrrd = Array::from_vec(vec![
0., 0., 0., 2., 2., 2., 0., 0., 0., 0., 2., 0., 2., 0., 0., 0., 2., 0., 0., 0., 0., 0.,
0., 0., 0., 2., 0., 2., 2., 0., 0., 0., 0., 0., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 2., 0., 0., 2., 0., 0., 2., 0., 0., 2., 0., 2., 0., 0., 2., 0., 2.,
0., 0., 0., 0., 0., 0., 0., 2., 2., 2., 2., 0., 0., 0., 0., 2., 3., 3., 3., 0., 0., 0.,
3., 0., 3., 3., 0., 3., 0., 0., 0., 3., 0., 3., 0., 0., 3., 0., 0., 0., 3., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 3., 0., 3., 0., 3., 3., 0., 0., 0., 0., 3., 3., 0., 0., 0., 0.,
0., 3., 0., 0., 0., 0., 3., 0., 0., 3., 0., 0., 3., 0., 0., 0., 0., 0., 0., 3., 3., 3.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 3., 0., 0., 1., 0.,
1., 0., 0., 0., 1., 0., 3., 0., 0., 3., 3., 3., 0., 1., 3., 1., 1., 0., 3., 0., 3., 3.,
0., 1., 0., 3., 0., 0., 3., 0., 3., 0., 0., 0., 3., 0., 3., 1., 0., 0., 1., 0., 3., 1.,
0., 0., 1., 0., 1., 3., 0., 1., 0., 1., 0., 3., 0., 0., 0., 0., 3., 1., 1., 1., 1., 0.,
0., 3., 0., 1., 1., 1., 1., 0., 0., 0., 1., 0., 1., 1., 0., 1., 0., 2., 2., 1., 0., 1.,
0., 2., 1., 0., 0., 0., 1., 0., 0., 0., 0., 2., 0., 2., 0., 0., 1., 0., 1., 0., 1., 1.,
0., 2., 0., 2., 1., 1., 0., 2., 0., 0., 2., 1., 0., 2., 0., 0., 1., 2., 0., 1., 0., 0.,
1., 0., 2., 0., 2., 0., 2., 1., 1., 1., 0., 0., 0., 0., 0., 1., 2., 0., 2., 0.,
])
.into_shape((4, 82))
.unwrap();
assert_eq!(encoded.forward, fwd);
assert_eq!(encoded.mirrored, mrrd);
}
#[test]
fn test_subsequence() {
let sequence =
"GGGUUUGCGGUGUAAGUGCAGCCCGUCUUACACCGUGCGGCACAGGCACUAGUACUGAUGUCGUAUACAGGGCUUUUGACAU";
let bpw = BasePairWeights {
AU: 2.0,
GC: 3.0,
GU: 1.0,
};
let encoded = EncodedSequence::with_basepair_weights(sequence, &bpw).unwrap();
let sub = encoded.subsequence(0, 5);
assert_eq!(
sub.forward,
encoded.forward.slice(s![.., 0..5]) //CowArray::from(encoded.forward.slice(s![.., 0..5]))
);
assert_eq!(
sub.mirrored,
encoded.mirrored.slice(s![.., 0..5]) //CowArray::from(encoded.mirrored.slice(s![.., 0..5]))
);
//let oligo = "AUGGG";
let oligo = "GGGAU";
let encoded_oligo = EncodedSequence::with_basepair_weights(oligo, &bpw).unwrap();
let concat_oligo = encoded.subsequence(80, 3);
assert_eq!(concat_oligo.forward, encoded_oligo.forward);
assert_eq!(concat_oligo.mirrored, encoded_oligo.mirrored);
}
#[test]
fn test_consecutivepairs() {
let sequence = "UGCGGUGUAAGUGC";
let bpw = BasePairWeights {
AU: 2.0,
GC: 3.0,
GU: 1.0,
};
let encoded = EncodedSequence::with_basepair_weights(sequence, &bpw).unwrap();
assert_eq!(encoded.consecutive_pairs_at_lag(25, 3), (0, 0, 0, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(23, 3), (0, 0, 0, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(21, 3), (0, 8, 13, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(16, 3), (1, 3, 13, 3.0));
assert_eq!(encoded.consecutive_pairs_at_lag(15, 3), (2, 5, 10, 2.0));
assert_eq!(encoded.consecutive_pairs_at_lag(12, 3), (3, 2, 10, 15.0));
assert_eq!(encoded.consecutive_pairs_at_lag(9, 3), (1, 0, 9, 2.0));
assert_eq!(encoded.consecutive_pairs_at_lag(5, 3), (0, 0, 5, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(4, 3), (1, 0, 4, 1.0));
assert_eq!(encoded.consecutive_pairs_at_lag(3, 3), (0, 0, 0, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(2, 3), (0, 0, 0, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(1, 3), (0, 0, 0, 0.0));
assert_eq!(encoded.consecutive_pairs_at_lag(0, 3), (0, 0, 0, 0.0));
// CGGCA ACGUAG GGGUU
//let tobesplit = "CGGCAACGUAGGGGUU";
let tobesplit = "GGGUUACGUAGCGGCA";
let tobesplitenc = EncodedSequence::with_basepair_weights(tobesplit, &bpw).unwrap();
let splitenc = tobesplitenc.subsequence(11, 5);
assert_eq!(splitenc.consecutive_pairs_at_lag(6, 3), (1, 1, 5, 9.0));
assert_eq!(splitenc.consecutive_pairs_at_lag(11, 3), (1, 4, 7, 1.0));
}
}
| {
self.paired().count()
} |
admission_test.go | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admit
import (
"testing"
)
func TestAdmission(t *testing.T) | {
handler := NewAlwaysAdmit()
err := handler.Admit(nil)
if err != nil {
t.Errorf("Unexpected error returned from admission handler")
}
} |
|
ext-options.js | __ace_shadowed__.define('ace/ext/options', ['require', 'exports', 'module' ], function(require, exports, module) {
var modesByName = modelist.modesByName;
var options = [
["Document", function(name) {
doclist.loadDoc(name, function(session) {
if (!session)
return;
session = env.split.setSession(session);
updateUIEditorOptions();
env.editor.focus();
});
}, doclist.all],
["Mode", function(value) {
env.editor.session.setMode(modesByName[value].mode || modesByName.text.mode);
env.editor.session.modeName = value;
}, function(value) {
return env.editor.session.modeName || "text"
}, modelist.modes],
["Split", function(value) {
var sp = env.split;
if (value == "none") {
if (sp.getSplits() == 2) {
env.secondSession = sp.getEditor(1).session;
}
sp.setSplits(1);
} else {
var newEditor = (sp.getSplits() == 1);
if (value == "below") {
sp.setOrientation(sp.BELOW);
} else {
sp.setOrientation(sp.BESIDE);
}
sp.setSplits(2);
if (newEditor) {
var session = env.secondSession || sp.getEditor(0).session;
var newSession = sp.setSession(session, 1);
newSession.name = session.name;
}
}
}, ["None", "Beside", "Below"]],
["Theme", function(value) {
if (!value)
return;
env.editor.setTheme("ace/theme/" + value);
themeEl.selectedValue = value;
}, function() {
return env.editor.getTheme();
}, {
"Bright": {
chrome: "Chrome",
clouds: "Clouds",
crimson_editor: "Crimson Editor",
dawn: "Dawn",
dreamweaver: "Dreamweaver",
eclipse: "Eclipse",
github: "GitHub",
solarized_light: "Solarized Light",
textmate: "TextMate",
tomorrow: "Tomorrow",
xcode: "XCode"
},
"Dark": {
ambiance: "Ambiance",
chaos: "Chaos",
clouds_midnight: "Clouds Midnight",
cobalt: "Cobalt",
idle_fingers: "idleFingers",
kr_theme: "krTheme",
merbivore: "Merbivore",
merbivore_soft: "Merbivore Soft",
mono_industrial: "Mono Industrial",
monokai: "Monokai",
pastel_on_dark: "Pastel on dark",
solarized_dark: "Solarized Dark",
twilight: "Twilight",
tomorrow_night: "Tomorrow Night",
tomorrow_night_blue: "Tomorrow Night Blue",
tomorrow_night_bright: "Tomorrow Night Bright",
tomorrow_night_eighties: "Tomorrow Night 80s",
vibrant_ink: "Vibrant Ink",
}
}],
["Code Folding", function(value) {
env.editor.getSession().setFoldStyle(value);
env.editor.setShowFoldWidgets(value !== "manual");
}, ["manual", "mark begin", "mark begin and end"]],
["Soft Wrap", function(value) {
value = value.toLowerCase()
var session = env.editor.getSession();
var renderer = env.editor.renderer;
session.setUseWrapMode(value == "off");
var col = parseInt(value) || null;
renderer.setPrintMarginColumn(col || 80);
session.setWrapLimitRange(col, col);
}, ["Off", "40 Chars", "80 Chars", "Free"]],
["Key Binding", function(value) {
env.editor.setKeyboardHandler(keybindings[value]);
}, ["Ace", "Vim", "Emacs", "Custom"]],
["Font Size", function(value) {
env.split.setFontSize(value + "px");
}, [10, 11, 12, 14, 16, 20, 24]],
["Full Line Selection", function(checked) {
env.editor.setSelectionStyle(checked ? "line" : "text");
}],
["Highlight Active Line", function(checked) {
env.editor.setHighlightActiveLine(checked);
}],
["Show Invisibles", function(checked) {
env.editor.setShowInvisibles(checked);
}],
["Show Gutter", function(checked) {
env.editor.renderer.setShowGutter(checked);
}],
["Show Indent Guides", function(checked) {
env.editor.renderer.setDisplayIndentGuides(checked);
}],
["Show Print Margin", function(checked) {
env.editor.renderer.setShowPrintMargin(checked);
}],
["Persistent HScroll", function(checked) {
env.editor.renderer.setHScrollBarAlwaysVisible(checked);
}],
["Animate Scrolling", function(checked) {
env.editor.setAnimatedScroll(checked);
}],
["Use Soft Tab", function(checked) {
env.editor.getSession().setUseSoftTabs(checked);
}],
["Highlight Selected Word", function(checked) {
env.editor.setHighlightSelectedWord(checked);
}],
["Enable Behaviours", function(checked) {
env.editor.setBehavioursEnabled(checked);
}],
["Fade Fold Widgets", function(checked) {
env.editor.setFadeFoldWidgets(checked);
}],
["Show Token info", function(checked) {
env.editor.setFadeFoldWidgets(checked);
}]
]
var createOptionsPanel = function(options) {
var html = []
var container = document.createElement("div");
container.style.cssText = "position: absolute; overflow: hidden";
var inner = document.createElement("div");
inner.style.cssText = "width: 120%;height:100%;overflow: scroll";
container.appendChild(inner);
html.push("<table><tbody>");
options.forEach(function(o) {
});
html.push(
'<tr>',
'<td>',
'<label for="', s,'"></label>',
'</td><td>',
'<input type="', s,'" name="', s,'" id="',s ,'">',
'</td>',
'</tr>'
)
html.push("</tbody></table>");
return container;
}
function bindCheckbox(id, callback) {
var el = document.getElementById(id);
if (localStorage && localStorage.getItem(id))
el.checked = localStorage.getItem(id) == "1";
var onCheck = function() {
callback(!!el.checked);
saveOption(el);
};
el.onclick = onCheck;
onCheck();
}
function bindDropdown(id, callback) {
var el = document.getElementById(id);
if (localStorage && localStorage.getItem(id))
el.value = localStorage.getItem(id);
var onChange = function() {
callback(el.value);
saveOption(el);
};
el.onchange = onChange;
onChange();
}
function fillOptgroup(list, el) {
list.forEach(function(item) {
var option = document.createElement("option");
option.setAttribute("value", item.name);
option.innerHTML = item.desc;
el.appendChild(option);
});
}
function fillDropdown(list, el) {
if (Array.isArray(list)) {
fillOptgroup(list, el);
return;
} | var group = document.createElement("optgroup");
group.setAttribute("label", i);
fillOptgroup(list[i], group);
el.appendChild(group);
}
}
function createOptionControl(opt) {
if (opt.values) {
var el = dom.createElement("select");
el.setAttribute("size", opt.visibleSize || 1);
fillDropdown(opt.values, el)
} else {
var el = dom.createElement("checkbox");
}
el.setAttribute("name", "opt_" + opt.name)
return el;
}
function createOptionCell(opt) {
if (opt.values) {
var el = dom.createElement("select");
el.setAttribute("size", opt.visibleSize || 1);
fillDropdown(opt.values, el)
} else {
var el = dom.createElement("checkbox");
}
el.setAttribute("name", "opt_" + opt.name)
return el;
}
createOptionsPanel(options)
}); | for(var i in list) { |
app.py | import getopt
import web
import sys
#from web.wsgiserver import CherryPyWSGIServer
#from cherrypy import wsgiserver
from cheroot import wsgi # This replaces the 2 above
from flask import Flask, request, request_started
from functools import wraps
from models import User, Account
from database import db_session
import simplejson as json
makejson = json.dumps
app = Flask(__name__)
makejson = json.dumps
DEFAULT_PORT_NO = 8888
def usageguide():
print "InsecureBankv2 Backend-Server"
print "Options: "
print " --port p serve on port p (default 8888)"
print " --help print this message"
@app.errorhandler(500)
def internal_servererror(error):
|
'''
The function handles the authentication mechanism
'''
@app.route('/login', methods=['POST'])
def login():
Responsemsg="fail"
user = request.form['username']
#checks for presence of user in the database #requires models.py
u = User.query.filter(User.username == request.form["username"]).first()
print "u=",u
if u and u.password == request.form["password"]:
Responsemsg="Correct Credentials"
elif u and u.password != request.form["password"]:
Responsemsg="Wrong Password"
elif not u:
Responsemsg="User Does not Exist"
else: Responsemsg="Some Error"
data = {"message" : Responsemsg, "user": user}
print makejson(data)
return makejson(data)
'''
The function responds back with the from and to debit accounts corresponding to logged in user
'''
@app.route('/getaccounts', methods=['POST'])
def getaccounts():
#set accounts from the request
Responsemsg="fail"
acc1=acc2=from_acc=to_acc=0
user=request.form['username']
#checks for presence of user in the database
u = User.query.filter(User.username == user).first()
if not u or u.password != request.form["password"]:
Responsemsg="Wrong Credentials so trx fail"
else:
Responsemsg="Correct Credentials so get accounts will continue"
a=Account.query.filter(Account.user == user)
for i in a:
if (i.type=='from'):
from_acc=i.account_number;
for j in a:
if (i.type=='to'):
to_acc=i.account_number;
data = {"message" : Responsemsg, "from": from_acc,"to": to_acc}
print makejson(data)
return makejson(data)
'''
The function takes a new password as input and passes it on to the change password module
'''
@app.route('/changepassword', methods=['POST'])
def changepassword():
#set accounts from the request
Responsemsg="fail"
newpassword=request.form['newpassword']
user=request.form['username']
print newpassword
u = User.query.filter(User.username == user).first() #checks for presence of user in the database
if not u:
Responsemsg="Error"
else:
Responsemsg="Change Password Successful"
u.password = newpassword
db_session.commit()
data = {"message" : Responsemsg}
print makejson(data)
return makejson(data)
'''
The function handles the transaction module
'''
@app.route('/dotransfer', methods=['POST'])
def dotransfer():
#set accounts from the request
Responsemsg="fail"
user=request.form['username']
amount=request.form['amount']
#print request.form["from_acc"]
u = User.query.filter(User.username == user).first() #checks for presence of user in the database
if not u or u.password != request.form["password"]:
Responsemsg="Wrong Credentials so trx fail"
#print Responsemsg
else:
Responsemsg="Success"
#print Responsemsg
from_acc = request.form["from_acc"]
to_acc = request.form["to_acc"]
amount = request.form["amount"]
from_account = Account.query.filter(Account.account_number == from_acc).first()
to_account = Account.query.filter(Account.account_number == to_acc).first()
#print "fromacc=",from_account
#print "amount===",amount
to_account.balance += int(request.form['amount'])
from_account.balance -= int(request.form['amount'])
db_session.commit()
data = {"message" : Responsemsg, "from": from_acc, "to": to_acc, "amount": amount}
#print makejson(data)
return makejson(data)
'''
The function provides login mechanism to a developer user during development phase
'''
@app.route('/devlogin', methods=['POST'])
def devlogin():
user=request.form['username']
Responsemsg="Correct Credentials"
data = {"message" : Responsemsg, "user": user}
print makejson(data)
return makejson(data)
if __name__ == '__main__':
port = DEFAULT_PORT_NO
options, args = getopt.getopt(sys.argv[1:], "", ["help", "port="])
for op, arg1 in options:
if op == "--help":
usageguide()
sys.exit(2)
elif op == "--port":
port = int(arg1)
urls = ("/.*", "app")
apps = web.application(urls, globals())
server = wsgi.Server(("0.0.0.0", port),app,server_name='localhost')
print "The server is hosted on port:",(port)
try:
server.start()
#apps.run(port)
except KeyboardInterrupt:
server.stop()
| print " [!]", error
return "Internal Server Error", 500 |
deploy_advanced.py | #!/usr/bin/python3
from brownie import AdvancedCollectible, accounts, network, config
from scripts.helpful_scripts import fund_advanced_collectible
def main():
print(config["wallets"]["from_key"])
dev = accounts.add(config["wallets"]["from_key"])
print(network.show_active())
# publish_source = True if os.getenv("ETHERSCAN_TOKEN") else False # Currently having an issue with this
publish_source = False
advanced_collectible = AdvancedCollectible.deploy(
config["networks"][network.show_active()]["vrf_coordinator"],
config["networks"][network.show_active()]["link_token"],
config["networks"][network.show_active()]["keyhash"],
{"from": dev},
publish_source=publish_source,
) | fund_advanced_collectible(advanced_collectible)
return advanced_collectible |
|
a2c.py | import tflearn
import math
import numpy as np
import tensorflow as tf
import os
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
FEATURE_NUM = 128
EPS = 1e-4
GAMMA = 0.99
class Network():
def CreateNetwork(self, inputs):
with tf.variable_scope('actor'):
split_0 = tflearn.fully_connected(
inputs[:, 0:1, -1], FEATURE_NUM, activation='relu')
split_1 = tflearn.fully_connected(
inputs[:, 1:2, -1], FEATURE_NUM, activation='relu')
split_2 = tflearn.conv_1d(
inputs[:, 2:3, :], FEATURE_NUM, 4, activation='relu')
split_3 = tflearn.conv_1d(
inputs[:, 3:4, :], FEATURE_NUM, 4, activation='relu')
split_4 = tflearn.conv_1d(
inputs[:, 4:5, :self.a_dim], FEATURE_NUM, 4, activation='relu')
split_5 = tflearn.fully_connected(
inputs[:, 5:6, -1], FEATURE_NUM, activation='relu')
split_2_flat = tflearn.flatten(split_2)
split_3_flat = tflearn.flatten(split_3)
split_4_flat = tflearn.flatten(split_4)
merge_net = tflearn.merge(
[split_0, split_1, split_2_flat, split_3_flat, split_4_flat, split_5], 'concat')
net = tflearn.fully_connected(
merge_net, FEATURE_NUM, activation='relu')
pi = tflearn.fully_connected(net, self.a_dim, activation='softmax')
value = tflearn.fully_connected(net, 1, activation='linear')
return pi, value
def get_network_params(self):
return self.sess.run(self.network_params)
def set_network_params(self, input_network_params):
self.sess.run(self.set_network_params_op, feed_dict={
i: d for i, d in zip(self.input_network_params, input_network_params)
})
def __init__(self, sess, state_dim, action_dim, learning_rate):
self.quality = 0
self.s_dim = state_dim
self.a_dim = action_dim
self.lr_rate = learning_rate
self.sess = sess
self.outputs = tf.placeholder(tf.float32, [None, 1])
self.inputs = tf.placeholder(
tf.float32, [None, self.s_dim[0], self.s_dim[1]])
self.acts = tf.placeholder(tf.float32, [None, self.a_dim])
self.entropy_weight = tf.placeholder(tf.float32)
self.pi, self.val = self.CreateNetwork(inputs=self.inputs)
self.real_out = tf.clip_by_value(self.pi, EPS, 1. - EPS)
self.log_prob = tf.log(tf.reduce_sum(tf.multiply(
self.real_out, self.acts), reduction_indices=1, keepdims=True))
self.entropy = tf.multiply(self.real_out, tf.log(self.real_out))
# Get all network parameters
self.network_params = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
# Set all network parameters
self.input_network_params = []
for param in self.network_params:
self.input_network_params.append(
tf.placeholder(tf.float32, shape=param.get_shape()))
self.set_network_params_op = [] | self.network_params[idx].assign(param))
self.loss = tflearn.mean_square(self.val, self.outputs) \
- tf.reduce_mean(self.log_prob * tf.stop_gradient(self.outputs - self.val)) \
+ self.entropy_weight * tf.reduce_mean(self.entropy)
self.optimize = tf.train.AdamOptimizer(
self.lr_rate).minimize(self.loss)
def predict(self, input):
action = self.sess.run(self.real_out, feed_dict={
self.inputs: input
})
return action[0]
def get_entropy(self, step):
if step < 20000:
return 5.
elif step < 50000:
return 3.
elif step < 70000:
return 1.
elif step < 90000:
return 0.5
elif step < 120000:
return 0.3
else:
return 0.1
def train(self, s_batch, a_batch, v_batch, epoch):
# print s_batch.shape, a_batch.shape, v_batch.shape
# s_batch, a_batch, v_batch = tflearn.data_utils.shuffle(
# s_batch, a_batch, v_batch)
self.sess.run(self.optimize, feed_dict={
self.inputs: s_batch,
self.acts: a_batch,
self.outputs: v_batch,
self.entropy_weight: self.get_entropy(epoch)
})
def compute_v(self, s_batch, a_batch, r_batch, terminal):
ba_size = len(s_batch)
R_batch = np.zeros([len(r_batch), 1])
if terminal:
R_batch[-1, 0] = 0 # terminal state
else:
v_batch = self.sess.run(self.val, feed_dict={
self.inputs: s_batch
})
R_batch[-1, 0] = v_batch[-1, 0] # boot strap from last state
for t in reversed(range(ba_size - 1)):
R_batch[t, 0] = r_batch[t] + GAMMA * R_batch[t + 1, 0]
return list(R_batch) | for idx, param in enumerate(self.input_network_params):
self.set_network_params_op.append( |
stack.go | package debug
import (
"runtime"
"strings"
)
// GoroutineID returns goroutine id of the goroutine that calls it.
// It calls runtime.Stack with a large enough buffer to capture the entire trace.
func GoroutineID() string | {
var buf [64]byte
n := runtime.Stack(buf[:], false)
return strings.Fields(strings.TrimPrefix(string(buf[:n]), "goroutine "))[0]
} |
|
conjugate_gradient_optimizer.py | import numpy as np
import theano
import theano.tensor as TT
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import krylov
from rllab.misc import logger
from rllab.misc.ext import sliced_fun
class PerlmutterHvp(Serializable):
def __init__(self, num_slices=1):
Serializable.quick_init(self, locals())
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
xs = tuple([ext.new_tensor_like("%s x" % p.name, p) for p in params])
def Hx_plain():
Hx_plain_splits = TT.grad(
TT.sum([TT.sum(g * x)
for g, x in zip(constraint_grads, xs)]),
wrt=params,
disconnected_inputs='warn'
)
return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: ext.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class FiniteDifferenceHvp(Serializable):
def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):
Serializable.quick_init(self, locals())
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self.target.set_param_values(
param_val + eps * flat_xs, trainable=True)
flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
if self.symmetric:
self.target.set_param_values(
param_val - eps * flat_xs, trainable=True)
flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self.target.set_param_values(param_val, trainable=True)
else:
self.target.set_param_values(param_val, trainable=True)
flat_grad = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self.opt_fun = ext.lazydict(
f_grad=lambda: ext.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_Hx_plain=lambda: f_Hx_plain,
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class ConjugateGradientOptimizer(Serializable):
| """
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
"""
def __init__(
self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the
computation time for the descent direction dominates, this can greatly reduce the overall computation time.
:param accept_violation: whether to accept the descent step if it violates the line search condition after
exhausting all backtracking budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name="constraint", *args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed
that the first dimension of these inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_constraint"], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples, int(n_samples * self._subsample_factor), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log("computing loss before")
loss_before = sliced_fun(self._opt_fun["f_loss"], self._num_slices)(
inputs, extra_inputs)
logger.log("performing update")
logger.log("computing descent direction")
flat_g = sliced_fun(self._opt_fun["f_grad"], self._num_slices)(
inputs, extra_inputs)
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))
)
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_param = np.copy(self._target.get_param_values(trainable=True))
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param, trainable=True)
loss, constraint_val = sliced_fun(
self._opt_fun["f_loss_constraint"], self._num_slices)(inputs, extra_inputs)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
logger.log("Violated because loss is NaN")
if np.isnan(constraint_val):
logger.log("Violated because constraint %s is NaN" %
self._constraint_name)
if loss >= loss_before:
logger.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
logger.log(
"Violated because constraint %s is violated" % self._constraint_name)
self._target.set_param_values(prev_param, trainable=True)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished") |
|
setan.js | module.exports = {
name : 'setan',
description : '',
execute(message, args){
const {Client, MessageAttachment} = require('discord.js');
let attachment = new MessageAttachment
('./db/IMG_20200623_195512.jpg');
message.channel.send(attachment)
} | } |
|
rendering.rs | use crate::{
constants::*,
game_logic::{board::*, game::*, piece::*, ranges::*},
rendering::{animation::*, ui::Button},
states::core_game_state::CoreGameSubstate,
};
use egui_macroquad::egui::TextBuffer;
use futures::sink::drain;
use instant::{Duration, Instant};
use macroquad::prelude::*;
use macroquad_canvas::Canvas2D;
use std::{
cell::Cell,
collections::{HashMap, VecDeque},
rc::Rc,
};
use macroquad::ui::Drag::No;
#[derive(Debug, Clone)]
pub struct CustomRenderContext {
pieces_texture: Texture2D,
special_texture: Texture2D,
pub game_state: CoreGameSubstate,
pub button_next: Button,
pub button_undo: Button,
start_time: Instant,
}
impl CustomRenderContext {
pub(crate) fn new() -> Self {
CustomRenderContext {
pieces_texture: Texture2D::from_file_with_format(
include_bytes!("../../resources/sprites/pieces.png"),
None,
),
special_texture: Texture2D::from_file_with_format(
include_bytes!("../../resources/sprites/special.png"),
None,
),
game_state: CoreGameSubstate::Place,
button_next: Button::new(10., "End Turn".to_string()),
button_undo: Button::new(120., "Undo".to_string()),
start_time: Instant::now(),
}
}
pub fn elapsed_time(&self) -> Duration {
self.start_time.elapsed()
}
/* pub fn reset_elapsed_time(&mut self) {
self.start_time = Instant::now();
}*/ | pub(crate) special_sprites: HashMap<u32, SpriteRender>,
pub(crate) unused_pieces: Vec<Vec<SpriteRender>>,
pub(crate) placed_pieces: HashMap<Point2, SpriteRender>,
pub(crate) effects: HashMap<Point2, Vec<EffectRender>>,
pub(crate) team_colors: Vec<Color>,
next_animations: VecDeque<Vec<Animation>>,
current_animations: Vec<Animation>,
}
impl BoardRender {
pub fn new(game: &Game) -> Self {
let mut unused_pieces = vec![vec![], vec![]];
let mut placed_pieces = HashMap::new();
let board = &game.board;
board.for_each_placed_piece(|point, piece| {
placed_pieces.insert(
point,
SpriteRender::for_piece(
&point,
piece.piece_kind,
game.get_team(piece.team_id).color,
),
);
});
BoardRender {
unused_pieces,
placed_pieces,
team_colors: game.teams.iter().map(|t| t.color).collect(),
special_sprites: HashMap::new(),
next_animations: VecDeque::new(),
effects: HashMap::new(),
current_animations: vec![],
}
}
pub fn add_unused_piece(&mut self, team_id: usize) {
let unused_pieces = &mut self.unused_pieces[team_id];
let (x_pos, y_pos) = if team_id == 0 {
let (upb_x, mut upb_y) = cell_coords_tuple(BOARD_WIDTH, BOARD_HEIGHT - 1);
upb_y += CELL_ABSOLUTE_WIDTH / 4.;
(upb_x, upb_y - unused_pieces.len() as f32 * 32.)
} else {
let (mut upb_x, upb_y) = cell_coords_tuple(0, 0);
upb_x -= CELL_ABSOLUTE_WIDTH / 1.25;
(upb_x, upb_y + unused_pieces.len() as f32 * 32.)
};
unused_pieces.push(SpriteRender::new(
x_pos,
y_pos,
PIECE_SCALE,
self.team_colors[team_id],
SpriteKind::Piece,
SpriteRender::piece_sprite_rect(PieceKind::Simple),
));
}
pub fn add_animation_sequence(&mut self, mut animations: Vec<Animation>) {
self.next_animations.push_back(animations);
}
pub fn add_placed_piece(&mut self, point: &Point2, piece_kind: PieceKind, team_id: usize, exhausted: bool) {
let mut piece_render = SpriteRender::for_piece(point, piece_kind, self.team_colors[team_id]);
if exhausted {
piece_render.override_color = Some(SpriteRender::greyed_out(&piece_render.color));
}
self.placed_pieces.insert(
*point,
piece_render,
);
}
pub fn update(&mut self) {
let mut new_animations = vec![];
self.current_animations
.iter_mut()
.filter(|a| a.finished_at <= Instant::now())
.for_each(|a| {
new_animations.append(&mut a.next_animations);
});
let anim_count = self.current_animations.len();
self.current_animations
.retain(|a| a.finished_at > Instant::now());
if anim_count != self.current_animations.len() || !new_animations.is_empty() {
// info!("animation count {} -> {}; {} new", anim_count, self.animations.len(), new_animations.len());
}
for animation in new_animations.iter_mut() {
animation.start(self);
}
self.current_animations.append(&mut new_animations);
if self.current_animations.is_empty() {
if let Some(mut animations) = self.next_animations.pop_front() {
for animation in animations.iter_mut() {
animation.start(self);
}
self.current_animations = animations;
}
}
}
pub fn render(&self, board: &Board, render_context: &CustomRenderContext, canvas: &Canvas2D) {
Self::render_cells(board, canvas);
for (point,effects) in &self.effects {
effects.iter().for_each(|e| e.render(point));
}
Self::render_highlights(board, render_context, canvas);
//println!("rendered {:?}", self.unused_pieces.len());
self.unused_pieces
.iter()
.flat_map(|p| p.iter())
.for_each(|p| {
p.render(render_context);
});
self.placed_pieces
.iter()
.for_each(|(_, p)| p.render(render_context));
self.special_sprites
.values()
.for_each(|s| s.render(render_context));
render_context.button_next.render(canvas);
render_context.button_undo.render(canvas);
}
fn render_highlights(board: &Board, render_context: &CustomRenderContext, canvas: &Canvas2D) {
let mut selected_point_option = Option::None;
let hovered_point = cell_hovered(canvas);
if let Some(hovered_piece) = board.get_piece_at(&hovered_point) {
let range_context = match render_context.game_state {
CoreGameSubstate::Place => RangeContext::Moving(*hovered_piece),
CoreGameSubstate::Move(selected_point) => {
selected_point_option = Option::Some(selected_point);
RangeContext::Moving(*hovered_piece)
}
CoreGameSubstate::Activate(selected_point) => {
selected_point_option = Option::Some(selected_point);
RangeContext::Special(*hovered_piece)
}
CoreGameSubstate::Won(_) => RangeContext::Moving(*hovered_piece),
CoreGameSubstate::Wait => RangeContext::Moving(*hovered_piece),
};
if let Some(m) = hovered_piece.movement.as_ref() {
let range = m.range;
Self::highlight_range(
board,
&hovered_point,
&range_context,
&range,
Color::from_rgba(90, 220, 90, 100),
)
}
}
if let Some(selected_point) = selected_point_option {
if let Some(selected_piece) = board.get_piece_at(&selected_point) {
let range_context = match render_context.game_state {
CoreGameSubstate::Place => RangeContext::Moving(*selected_piece),
CoreGameSubstate::Move(_) => RangeContext::Moving(*selected_piece),
CoreGameSubstate::Activate(_) => RangeContext::Special(*selected_piece),
CoreGameSubstate::Won(_) => RangeContext::Moving(*selected_piece),
CoreGameSubstate::Wait => RangeContext::Moving(*selected_piece),
};
let range_option: Option<Range> = match render_context.game_state {
CoreGameSubstate::Place => Option::None,
CoreGameSubstate::Move(_) => selected_piece.movement.map(|m| m.range),
CoreGameSubstate::Activate(_) => selected_piece.activatable.map(|m| m.range),
CoreGameSubstate::Won(_) => selected_piece.movement.map(|m| m.range),
CoreGameSubstate::Wait => Option::None,
};
if let Some(range) = range_option {
Self::highlight_range(
board,
&selected_point,
&range_context,
&range,
Color::from_rgba(0, 150, 0, 150),
)
}
}
}
}
fn render_cells(board: &Board, canvas: &Canvas2D) {
board.for_each_cell(|cell| {
let (x_pos, y_pos) = cell_coords(&cell.point);
let mouse_point = cell_hovered(canvas);
let color = if cell.point == mouse_point {
BLUE
} else if (cell.point.x + cell.point.y + 1) % 2 == 0 {
Color::from_rgba(187, 173, 160, 255)
} else {
Color::from_rgba(238, 228, 218, 255)
};
draw_rectangle(
x_pos,
y_pos,
CELL_ABSOLUTE_WIDTH,
CELL_ABSOLUTE_WIDTH,
color,
);
draw_rectangle_lines(
x_pos,
y_pos,
CELL_ABSOLUTE_WIDTH,
CELL_ABSOLUTE_WIDTH,
1.,
BLACK,
);
});
}
fn highlight_range(
board: &Board,
source_point: &Point2,
range_context: &RangeContext,
range: &Range,
color: Color,
) {
for point in range
.reachable_points(source_point, board, range_context)
.iter()
{
let (x_pos, y_pos) = cell_coords(&point);
let mut used_color = color;
if let Some(_piece) = board.get_piece_at(point) {
used_color = Color {
r: 1.,
..used_color
}
}
draw_rectangle(
x_pos,
y_pos,
CELL_ABSOLUTE_WIDTH,
CELL_ABSOLUTE_WIDTH,
used_color,
);
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct EffectRender {
pub from_color: Color,
pub towards_color: Color,
pub from_instant: Instant,
pub towards_instant: Instant,
}
#[derive(Clone, Copy, Debug)]
pub struct SpriteRender {
pub from: AnimationPoint,
pub to: AnimationPoint,
pub override_color: Option<Color>,
pub color: Color,
sprite_kind: SpriteKind,
rect_in_sprite: Rect,
}
#[derive(Clone, Copy, Debug)]
pub enum SpriteKind {
Piece,
Special,
}
impl EffectRender {
pub fn new() -> Self {
EffectRender {
from_color: Color::new(80., 0., 100., 0.0),
towards_color: Color::new(80., 0., 100., 0.6),
from_instant: Instant::now(),
towards_instant: Instant::now() + Duration::from_millis(ANIMATION_SPEED*3)
}
}
pub fn render(&self, at: &Point2) {
let (x_pos, y_pos) = cell_coords(at);
let progress = AnimationPoint::calculate_progress(&self.from_instant, &self.towards_instant, &Instant::now());
draw_rectangle(
x_pos,
y_pos,
CELL_ABSOLUTE_WIDTH,
CELL_ABSOLUTE_WIDTH,
Color {
r: AnimationPoint::interpolate_value(self.from_color.r, self.towards_color.r, progress),
g: AnimationPoint::interpolate_value(self.from_color.g, self.towards_color.g, progress),
b: AnimationPoint::interpolate_value(self.from_color.b, self.towards_color.b, progress),
a: AnimationPoint::interpolate_value(self.from_color.a, self.towards_color.a, progress)
}
);
}
}
impl SpriteRender {
pub fn new(
x_pos: f32,
y_pos: f32,
scale: f32,
color: Color,
sprite_kind: SpriteKind,
rect_in_sprite: Rect,
) -> Self {
let pap = AnimationPoint {
x_pos,
y_pos,
sprite_width: scale,
instant: Instant::now(),
};
Self::animated(pap, pap, color, sprite_kind, rect_in_sprite)
}
pub(crate) fn new_at_point(
point: &Point2,
sprite_width: f32,
color: Color,
sprite_kind: SpriteKind,
rect_in_sprite: Rect,
) -> SpriteRender {
let (x_pos, y_pos) = Self::render_pos(sprite_width, point);
SpriteRender::new(
x_pos,
y_pos,
sprite_width,
color,
sprite_kind,
rect_in_sprite,
)
}
pub(crate) fn for_piece(point: &Point2, piece_kind: PieceKind, color: Color) -> SpriteRender {
SpriteRender::new_at_point(
point,
PIECE_SCALE,
color,
SpriteKind::Piece,
Self::piece_sprite_rect(piece_kind),
)
}
fn render_pos(sprite_width: f32, point: &Point2) -> (f32, f32) {
let (x_pos, y_pos) = cell_coords(point);
let shift = (CELL_ABSOLUTE_WIDTH - sprite_width) / 2.;
(x_pos + shift, y_pos + shift)
}
fn animated(
from: AnimationPoint,
to: AnimationPoint,
color: Color,
sprite_kind: SpriteKind,
rect_in_sprite: Rect,
) -> Self {
SpriteRender {
from,
to,
override_color: None,
color,
sprite_kind,
rect_in_sprite,
}
}
fn piece_sprite_rect(piece_kind: PieceKind) -> Rect {
let (sprite_x, sprite_y) = match piece_kind {
PieceKind::Simple => (0, 0),
PieceKind::HorizontalBar => (1, 0),
PieceKind::VerticalBar => (0, 1),
PieceKind::Cross => (1, 1),
PieceKind::Queen => (0, 2),
PieceKind::Castle => (1, 2),
PieceKind::Sniper => (0, 3),
};
Rect {
x: sprite_x as f32 * SPRITE_WIDTH,
y: sprite_y as f32 * SPRITE_WIDTH,
w: SPRITE_WIDTH,
h: SPRITE_WIDTH,
}
}
pub fn greyed_out(color: &Color) -> Color {
Color::new(
(color.r + WHITE.r * 2.) / 3.,
(color.g + WHITE.g * 2.) / 3.,
(color.b + WHITE.b * 2.) / 3.,
255.
)
}
pub fn move_towards(&mut self, point: &Point2, speed_ms: u64) {
self.from = self.to;
self.from.instant = Instant::now();
let (x_pos, y_pos) = Self::render_pos(self.from.sprite_width, point);
self.to = AnimationPoint {
x_pos,
y_pos,
sprite_width: self.from.sprite_width,
instant: Instant::now() + Duration::from_millis(speed_ms),
};
}
pub fn scale(&mut self, sprite_width: f32, speed_ms: u64) {
self.from.instant = Instant::now();
self.to = SpriteRender::scale_animation_point(&self.from, sprite_width);
self.to.instant = Instant::now() + Duration::from_millis(speed_ms);
}
pub fn scale_animation_point(animation_point: &AnimationPoint, sprite_width: f32) -> AnimationPoint {
let shift = (CELL_ABSOLUTE_WIDTH - sprite_width) / 2.;
AnimationPoint {
x_pos: animation_point.x_pos + animation_point.sprite_width / 2. - CELL_ABSOLUTE_WIDTH / 2. + shift,
y_pos: animation_point.y_pos + animation_point.sprite_width / 2. - CELL_ABSOLUTE_WIDTH / 2. + shift,
sprite_width,
instant: Instant::now(),
}
}
fn render(&self, render_context: &CustomRenderContext) {
let animation = self.from.interpolate(&self.to, Instant::now());
let texture = match self.sprite_kind {
SpriteKind::Piece => render_context.pieces_texture,
SpriteKind::Special => render_context.special_texture,
};
draw_texture_ex(
texture,
animation.x_pos,
animation.y_pos,
self.override_color.unwrap_or(self.color),
DrawTextureParams {
dest_size: Some(Vec2::new(animation.sprite_width, animation.sprite_width)),
source: Some(self.rect_in_sprite),
..Default::default()
},
);
/*draw_rectangle_lines(
animation.x_pos,
animation.y_pos,
animation.sprite_width,
animation.sprite_width,
2.,
GREEN
)*/
}
} | }
pub struct BoardRender { |
pet_test.go | package models
import (
"encoding/json"
"testing"
)
var testJSON = `{"id":"507f191e810c19729de860ea", "userId":"Me", "name":"Fluffy", "type":"Snake", "breed":"Python", "dateOfBirth":"September", "image":"base64"}`
var testJSONBadID = `{"id":"bad", "userId":"Me", "name":"Fluffy", "type":"Snake", "breed":"Python", "dateOfBirth":"September", "image":"base64"}`
func TestValidPet(t *testing.T) {
var pet Pet
err := json.Unmarshal([]byte(testJSON), &pet)
if err != nil {
t.Errorf("Unexecpted error: %s", err)
}
if pet.UserID != "Me" {
t.Errorf("Incorrect userId, expected: %s, actual: %s", pet.UserID, "Me")
}
if pet.Name != "Fluffy" {
t.Errorf("Incorrect name, expected: %s, actual: %s", pet.Name, "Fluffy")
}
if pet.Type != "Snake" {
t.Errorf("Incorrect type, expected: %s, actual: %s", pet.Type, "Snake")
}
if pet.Breed != "Python" {
t.Errorf("Incorrect breed, expected: %s, actual: %s", pet.Breed, "Python")
}
if pet.DateOfBirth != "September" {
t.Errorf("Incorrect dateOfBirth, expected: %s, actual: %s", pet.DateOfBirth, "September")
}
if pet.Image != "base64" {
t.Errorf("Incorrect image, expected: %s, actual: %s", pet.Image, "base64")
}
}
func | (t *testing.T) {
var pet Pet
err := json.Unmarshal([]byte(testJSONBadID), &pet)
if err.Error() != `invalid ObjectId in JSON: "bad"` {
t.Errorf("Expected error %s, but was %s", `invalid ObjectId in JSON: "bad"`, err)
}
}
| TestInvalidId |
index.js | export { default } from "./BezierController"; | ||
uart_fifo_wdata.rs | #[doc = "Register `uart_fifo_wdata` writer"]
pub struct W(crate::W<UART_FIFO_WDATA_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<UART_FIFO_WDATA_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<UART_FIFO_WDATA_SPEC>> for W {
fn from(writer: crate::W<UART_FIFO_WDATA_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `uart_fifo_wdata` writer - "]
pub struct UART_FIFO_WDATA_W<'a> {
w: &'a mut W,
}
impl<'a> UART_FIFO_WDATA_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
impl W {
#[doc = "Bits 0:7"]
#[inline(always)]
pub fn uart_fifo_wdata(&mut self) -> UART_FIFO_WDATA_W {
UART_FIFO_WDATA_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "uart_fifo_wdata.\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [uart_fifo_wdata](index.html) module"]
pub struct UART_FIFO_WDATA_SPEC;
impl crate::RegisterSpec for UART_FIFO_WDATA_SPEC {
type Ux = u32;
} | #[doc = "`reset()` method sets uart_fifo_wdata to value 0"]
impl crate::Resettable for UART_FIFO_WDATA_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | #[doc = "`write(|w| ..)` method takes [uart_fifo_wdata::W](W) writer structure"]
impl crate::Writable for UART_FIFO_WDATA_SPEC {
type Writer = W;
} |
midi_coremidi.go | // +build darwin coremidi
// +build !portmidi
package main
import (
"errors"
"fmt" | )
var midiOut coremidi.OutputPort
var midiDestination coremidi.Destination
func sendMidiNote(channel, note, velocity int) {
if note != 0 {
packet := coremidi.NewPacket([]byte{byte(midiNoteOn | channel), byte(note), byte(velocity)})
_ = packet.Send(&midiOut, &midiDestination)
}
}
func initMidi(chosenPort int) (err error) {
allDests, err := coremidi.AllDestinations()
if len(allDests) == 0 {
return errors.New("no midi output found")
}
fmt.Println("midi outputs found:")
for i, d := range allDests {
fmt.Printf("%d: \"(%s), %s\"", i, d.Manufacturer(), d.Name())
if i == 0 {
fmt.Print(" ", labelPortDefault)
}
if i == chosenPort {
fmt.Print(" ", labelPortSelected)
}
fmt.Println()
}
if chosenPort != defaultMidiPort {
if chosenPort >= len(allDests) {
logger.Fatalf("selected midi port does not exist: %d\n", chosenPort)
}
midiDestination = allDests[chosenPort]
} else {
midiDestination = allDests[0]
}
client, err := coremidi.NewClient("droguedrums-client")
if err != nil {
fmt.Println(err)
return
}
midiOut, err = coremidi.NewOutputPort(client, "droguedrums-port")
if err != nil {
fmt.Println(err)
return
}
return
}
func closeMidi() {
} | "github.com/youpy/go-coremidi" |
setup.py | """
Copyright 2016 Deepgram
| you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
###############################################################################
from __future__ import print_function
import sys
###############################################################################
def error_message(msg):
""" Prints an error message and exits.
"""
line_width = 60
format_spec = '{{: ^{width}}}'.format(width=line_width)
lines = [
'', '',
'='*line_width, '',
'ERROR', '',
msg, ''
'See our troubleshooting page to get started:', '',
'https://kur.deepgram.com/troubleshooting.html#installation', '',
'='*line_width, '',
"Uh, oh. There was an error. Look up there ^^^^ and you'll be",
'training awesome models in no time!'
]
for line in lines:
print(format_spec.format(line), file=sys.stderr)
sys.exit(1)
###############################################################################
if sys.version_info < (3, 4):
error_message('Kur requires Python 3.4 or later.')
###############################################################################
# pylint: disable=wrong-import-position
import os
from setuptools import setup, find_packages
# pylint: enable=wrong-import-position
################################################################################
def readme():
""" Return the README text.
"""
with open('README.rst', 'rb') as fh:
result = fh.read()
result = result.decode('utf-8')
token = '.. package_readme_ends_here'
mark = result.find(token)
if mark >= 0:
result = result[:mark]
token = '.. package_readme_starts_here'
mark = result.find(token)
if mark >= 0:
result = result[mark+len(token):]
chunks = []
skip = False
for chunk in result.split('\n\n'):
if not chunk:
pass
elif chunk.strip().startswith('.. package_readme_ignore'):
skip = True
elif skip:
skip = False
else:
chunks.append(chunk)
result = '\n\n'.join(chunks)
return result
################################################################################
def get_version():
""" Gets the current version of the package.
"""
version_py = os.path.join(os.path.dirname(__file__), 'kur', 'version.py')
with open(version_py, 'r') as fh:
for line in fh:
if line.startswith('__version__'):
return line.split('=')[-1].strip().replace('"', '')
raise ValueError('Failed to parse version from: {}'.format(version_py))
################################################################################
setup(
# Package information
name='kur',
version=get_version(),
description='Descriptive deep learning',
long_description=readme(),
keywords='deep learning',
classifiers=[
],
# Author information
url='https://github.com/deepgram/kur',
author='Adam Sypniewski',
author_email='[email protected]',
license='Apache Software License '
'(http://www.apache.org/licenses/LICENSE-2.0)',
# What is packaged here.
packages=find_packages(),
# What to include.
package_data={
'': ['*.txt', '*.rst', '*.md']
},
# Dependencies
install_requires=[
'pyyaml>=3.12',
'jinja2>=2.8',
'numpy>=1.11.2',
'tqdm>=4.10.0',
# Keras - the default backend (with Theano)
'keras>=1.2.2',
'theano>=0.8.2',
'scipy>=0.18.1',
'python-magic>=0.4.12',
'pydub>=0.16.6',
'python_speech_features>=0.4',
'matplotlib>=1.5.3'
],
dependency_links=[
],
# Testing
test_suite='tests',
tests_require=[
'pytest',
'tensorflow'
],
setup_requires=['pytest-runner'],
entry_points={
'console_scripts' : ['kur=kur.__main__:main']
},
zip_safe=False
)
#### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF | Licensed under the Apache License, Version 2.0 (the "License"); |
yolov3_to_onnx.py | #!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from collections import OrderedDict
import sys
import os
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], os.path.pardir))
from downloader import getFilePath
class DarkNetParser(object):
"""Definition of a parser for DarkNet-based YOLOv3-608 (only tested for this topology)."""
def __init__(self, supported_layers):
"""Initializes a DarkNetParser object.
Keyword argument:
supported_layers -- a string list of supported layers in DarkNet naming convention,
parameters are only added to the class dictionary if a parsed layer is included.
"""
# A list of YOLOv3 layers containing dictionaries with all layer
# parameters:
self.layer_configs = OrderedDict()
self.supported_layers = supported_layers
self.layer_counter = 0
def parse_cfg_file(self, cfg_file_path):
"""Takes the yolov3.cfg file and parses it layer by layer,
appending each layer's parameters as a dictionary to layer_configs.
Keyword argument:
cfg_file_path -- path to the yolov3.cfg file as string
"""
with open(cfg_file_path) as cfg_file:
remainder = cfg_file.read()
while remainder is not None:
layer_dict, layer_name, remainder = self._next_layer(remainder)
if layer_dict is not None:
self.layer_configs[layer_name] = layer_dict
return self.layer_configs
def _next_layer(self, remainder):
"""Takes in a string and segments it by looking for DarkNet delimiters.
Returns the layer parameters and the remaining string after the last delimiter.
Example for the first Conv layer in yolo.cfg ...
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
... becomes the following layer_dict return value:
{'activation': 'leaky', 'stride': 1, 'pad': 1, 'filters': 32,
'batch_normalize': 1, 'type': 'convolutional', 'size': 3}.
'001_convolutional' is returned as layer_name, and all lines that follow in yolo.cfg
are returned as the next remainder.
Keyword argument:
remainder -- a string with all raw text after the previously parsed layer
"""
remainder = remainder.split('[', 1)
if len(remainder) == 2:
remainder = remainder[1]
else:
return None, None, None
remainder = remainder.split(']', 1)
if len(remainder) == 2:
layer_type, remainder = remainder
else:
return None, None, None
if remainder.replace(' ', '')[0] == '#':
remainder = remainder.split('\n', 1)[1]
layer_param_block, remainder = remainder.split('\n\n', 1)
layer_param_lines = layer_param_block.split('\n')[1:]
layer_name = str(self.layer_counter).zfill(3) + '_' + layer_type
layer_dict = dict(type=layer_type)
if layer_type in self.supported_layers:
for param_line in layer_param_lines:
if param_line[0] == '#':
continue
param_type, param_value = self._parse_params(param_line)
layer_dict[param_type] = param_value
self.layer_counter += 1
return layer_dict, layer_name, remainder
def _parse_params(self, param_line):
"""Identifies the parameters contained in one of the cfg file and returns
them in the required format for each parameter type, e.g. as a list, an int or a float.
Keyword argument:
param_line -- one parsed line within a layer block
"""
param_line = param_line.replace(' ', '')
param_type, param_value_raw = param_line.split('=')
param_value = None
if param_type == 'layers':
layer_indexes = list()
for index in param_value_raw.split(','):
layer_indexes.append(int(index))
param_value = layer_indexes
elif isinstance(param_value_raw, str) and not param_value_raw.isalpha():
condition_param_value_positive = param_value_raw.isdigit()
condition_param_value_negative = param_value_raw[0] == '-' and \
param_value_raw[1:].isdigit()
if condition_param_value_positive or condition_param_value_negative:
param_value = int(param_value_raw)
else:
param_value = float(param_value_raw)
else:
param_value = str(param_value_raw)
return param_type, param_value
class MajorNodeSpecs(object):
"""Helper class used to store the names of ONNX output names,
corresponding to the output of a DarkNet layer and its output channels.
Some DarkNet layers are not created and there is no corresponding ONNX node,
but we still need to track them in order to set up skip connections.
"""
def | (self, name, channels):
""" Initialize a MajorNodeSpecs object.
Keyword arguments:
name -- name of the ONNX node
channels -- number of output channels of this node
"""
self.name = name
self.channels = channels
self.created_onnx_node = False
if name is not None and isinstance(channels, int) and channels > 0:
self.created_onnx_node = True
class ConvParams(object):
"""Helper class to store the hyper parameters of a Conv layer,
including its prefix name in the ONNX graph and the expected dimensions
of weights for convolution, bias, and batch normalization.
Additionally acts as a wrapper for generating safe names for all
weights, checking on feasible combinations.
"""
def __init__(self, node_name, batch_normalize, conv_weight_dims):
"""Constructor based on the base node name (e.g. 101_convolutional), the batch
normalization setting, and the convolutional weights shape.
Keyword arguments:
node_name -- base name of this YOLO convolutional layer
batch_normalize -- bool value if batch normalization is used
conv_weight_dims -- the dimensions of this layer's convolutional weights
"""
self.node_name = node_name
self.batch_normalize = batch_normalize
assert len(conv_weight_dims) == 4
self.conv_weight_dims = conv_weight_dims
def generate_param_name(self, param_category, suffix):
"""Generates a name based on two string inputs,
and checks if the combination is valid."""
assert suffix
assert param_category in ['bn', 'conv']
assert(suffix in ['scale', 'mean', 'var', 'weights', 'bias'])
if param_category == 'bn':
assert self.batch_normalize
assert suffix in ['scale', 'bias', 'mean', 'var']
elif param_category == 'conv':
assert suffix in ['weights', 'bias']
if suffix == 'bias':
assert not self.batch_normalize
param_name = self.node_name + '_' + param_category + '_' + suffix
return param_name
class ResizeParams(object):
#Helper class to store the scale parameter for an Resize node.
def __init__(self, node_name, value):
"""Constructor based on the base node name (e.g. 86_Resize),
and the value of the scale input tensor.
Keyword arguments:
node_name -- base name of this YOLO Resize layer
value -- the value of the scale input to the Resize layer as numpy array
"""
self.node_name = node_name
self.value = value
def generate_param_name(self):
"""Generates the scale parameter name for the Resize node."""
param_name = self.node_name + '_' + "scale"
return param_name
def generate_roi_name(self):
"""Generates the roi input name for the Resize node."""
param_name = self.node_name + '_' + "roi"
return param_name
class WeightLoader(object):
"""Helper class used for loading the serialized weights of a binary file stream
and returning the initializers and the input tensors required for populating
the ONNX graph with weights.
"""
def __init__(self, weights_file_path):
"""Initialized with a path to the YOLOv3 .weights file.
Keyword argument:
weights_file_path -- path to the weights file.
"""
self.weights_file = self._open_weights_file(weights_file_path)
def load_resize_scales(self, resize_params):
"""Returns the initializers with the value of the scale input
tensor given by resize_params.
Keyword argument:
resize_params -- a ResizeParams object
"""
initializer = list()
inputs = list()
name = resize_params.generate_param_name()
shape = resize_params.value.shape
data = resize_params.value
scale_init = helper.make_tensor(
name, TensorProto.FLOAT, shape, data)
scale_input = helper.make_tensor_value_info(
name, TensorProto.FLOAT, shape)
initializer.append(scale_init)
inputs.append(scale_input)
# In opset 11 an additional input named roi is required. Create a dummy tensor to satisfy this.
# It is a 1D tensor of size of the rank of the input (4)
rank = 4
roi_name = resize_params.generate_roi_name()
roi_input = helper.make_tensor_value_info(roi_name, TensorProto.FLOAT, [rank])
roi_init = helper.make_tensor(roi_name, TensorProto.FLOAT, [rank], [0,0,0,0])
initializer.append(roi_init)
inputs.append(roi_input)
return initializer, inputs
def load_conv_weights(self, conv_params):
"""Returns the initializers with weights from the weights file and
the input tensors of a convolutional layer for all corresponding ONNX nodes.
Keyword argument:
conv_params -- a ConvParams object
"""
initializer = list()
inputs = list()
if conv_params.batch_normalize:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'bn', 'bias')
bn_scale_init, bn_scale_input = self._create_param_tensors(
conv_params, 'bn', 'scale')
bn_mean_init, bn_mean_input = self._create_param_tensors(
conv_params, 'bn', 'mean')
bn_var_init, bn_var_input = self._create_param_tensors(
conv_params, 'bn', 'var')
initializer.extend(
[bn_scale_init, bias_init, bn_mean_init, bn_var_init])
inputs.extend([bn_scale_input, bias_input,
bn_mean_input, bn_var_input])
else:
bias_init, bias_input = self._create_param_tensors(
conv_params, 'conv', 'bias')
initializer.append(bias_init)
inputs.append(bias_input)
conv_init, conv_input = self._create_param_tensors(
conv_params, 'conv', 'weights')
initializer.append(conv_init)
inputs.append(conv_input)
return initializer, inputs
def _open_weights_file(self, weights_file_path):
"""Opens a YOLOv3 DarkNet file stream and skips the header.
Keyword argument:
weights_file_path -- path to the weights file.
"""
weights_file = open(weights_file_path, 'rb')
length_header = 5
np.ndarray(
shape=(length_header, ), dtype='int32', buffer=weights_file.read(
length_header * 4))
return weights_file
def _create_param_tensors(self, conv_params, param_category, suffix):
"""Creates the initializers with weights from the weights file together with
the input tensors.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name, param_data, param_data_shape = self._load_one_param_type(
conv_params, param_category, suffix)
initializer_tensor = helper.make_tensor(
param_name, TensorProto.FLOAT, param_data_shape, param_data)
input_tensor = helper.make_tensor_value_info(
param_name, TensorProto.FLOAT, param_data_shape)
return initializer_tensor, input_tensor
def _load_one_param_type(self, conv_params, param_category, suffix):
"""Deserializes the weights from a file stream in the DarkNet order.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
param_name = conv_params.generate_param_name(param_category, suffix)
channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
if param_category == 'bn':
param_shape = [channels_out]
elif param_category == 'conv':
if suffix == 'weights':
param_shape = [channels_out, channels_in, filter_h, filter_w]
elif suffix == 'bias':
param_shape = [channels_out]
param_size = np.product(np.array(param_shape))
param_data = np.ndarray(
shape=param_shape,
dtype='float32',
buffer=self.weights_file.read(param_size * 4))
param_data = param_data.flatten().astype(float)
return param_name, param_data, param_shape
class GraphBuilderONNX(object):
"""Class for creating an ONNX graph from a previously generated list of layer dictionaries."""
def __init__(self, output_tensors):
"""Initialize with all DarkNet default parameters used creating YOLOv3,
and specify the output tensors as an OrderedDict for their output dimensions
with their names as keys.
Keyword argument:
output_tensors -- the output tensors as an OrderedDict containing the keys'
output dimensions
"""
self.output_tensors = output_tensors
self._nodes = list()
self.graph_def = None
self.input_tensor = None
self.epsilon_bn = 1e-5
self.momentum_bn = 0.99
self.alpha_lrelu = 0.1
self.param_dict = OrderedDict()
self.major_node_specs = list()
self.batch_size = 1
def build_onnx_graph(
self,
layer_configs,
weights_file_path,
verbose=True):
"""Iterate over all layer configs (parsed from the DarkNet representation
of YOLOv3-608), create an ONNX graph, populate it with weights from the weights
file and return the graph definition.
Keyword arguments:
layer_configs -- an OrderedDict object with all parsed layers' configurations
weights_file_path -- location of the weights file
verbose -- toggles if the graph is printed after creation (default: True)
"""
for layer_name in layer_configs.keys():
layer_dict = layer_configs[layer_name]
major_node_specs = self._make_onnx_node(layer_name, layer_dict)
if major_node_specs.name is not None:
self.major_node_specs.append(major_node_specs)
outputs = list()
for tensor_name in self.output_tensors.keys():
output_dims = [self.batch_size, ] + \
self.output_tensors[tensor_name]
output_tensor = helper.make_tensor_value_info(
tensor_name, TensorProto.FLOAT, output_dims)
outputs.append(output_tensor)
inputs = [self.input_tensor]
weight_loader = WeightLoader(weights_file_path)
initializer = list()
# If a layer has parameters, add them to the initializer and input lists.
for layer_name in self.param_dict.keys():
_, layer_type = layer_name.split('_', 1)
params = self.param_dict[layer_name]
if layer_type == 'convolutional':
initializer_layer, inputs_layer = weight_loader.load_conv_weights(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
elif layer_type == "upsample":
initializer_layer, inputs_layer = weight_loader.load_resize_scales(
params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
del weight_loader
self.graph_def = helper.make_graph(
nodes=self._nodes,
name='YOLOv3-608',
inputs=inputs,
outputs=outputs,
initializer=initializer
)
if verbose:
print(helper.printable_graph(self.graph_def))
model_def = helper.make_model(self.graph_def,
producer_name='NVIDIA TensorRT sample')
return model_def
def _make_onnx_node(self, layer_name, layer_dict):
"""Take in a layer parameter dictionary, choose the correct function for
creating an ONNX node and store the information important to graph creation
as a MajorNodeSpec object.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
layer_type = layer_dict['type']
if self.input_tensor is None:
if layer_type == 'net':
major_node_output_name, major_node_output_channels = self._make_input_tensor(
layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
raise ValueError('The first node has to be of type "net".')
else:
node_creators = dict()
node_creators['convolutional'] = self._make_conv_node
node_creators['shortcut'] = self._make_shortcut_node
node_creators['route'] = self._make_route_node
node_creators['upsample'] = self._make_resize_node
if layer_type in node_creators.keys():
major_node_output_name, major_node_output_channels = \
node_creators[layer_type](layer_name, layer_dict)
major_node_specs = MajorNodeSpecs(major_node_output_name,
major_node_output_channels)
else:
print(
'Layer of type %s not supported, skipping ONNX node generation.' %
layer_type)
major_node_specs = MajorNodeSpecs(layer_name,
None)
return major_node_specs
def _make_input_tensor(self, layer_name, layer_dict):
"""Create an ONNX input tensor from a 'net' layer and store the batch size.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
batch_size = layer_dict['batch']
channels = layer_dict['channels']
height = layer_dict['height']
width = layer_dict['width']
self.batch_size = batch_size
input_tensor = helper.make_tensor_value_info(
str(layer_name), TensorProto.FLOAT, [
batch_size, channels, height, width])
self.input_tensor = input_tensor
return layer_name, channels
def _get_previous_node_specs(self, target_index=-1):
"""Get a previously generated ONNX node (skip those that were not generated).
Target index can be passed for jumping to a specific index.
Keyword arguments:
target_index -- optional for jumping to a specific index (default: -1 for jumping
to previous element)
"""
previous_node = None
for node in self.major_node_specs[target_index::-1]:
if node.created_onnx_node:
previous_node = node
break
assert previous_node is not None
return previous_node
def _make_conv_node(self, layer_name, layer_dict):
"""Create an ONNX Conv node with optional batch normalization and
activation nodes.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
previous_channels = previous_node_specs.channels
kernel_size = layer_dict['size']
stride = layer_dict['stride']
filters = layer_dict['filters']
batch_normalize = False
if 'batch_normalize' in layer_dict.keys(
) and layer_dict['batch_normalize'] == 1:
batch_normalize = True
kernel_shape = [kernel_size, kernel_size]
weights_shape = [filters, previous_channels] + kernel_shape
conv_params = ConvParams(layer_name, batch_normalize, weights_shape)
strides = [stride, stride]
dilations = [1, 1]
weights_name = conv_params.generate_param_name('conv', 'weights')
inputs.append(weights_name)
if not batch_normalize:
bias_name = conv_params.generate_param_name('conv', 'bias')
inputs.append(bias_name)
conv_node = helper.make_node(
'Conv',
inputs=inputs,
outputs=[layer_name],
kernel_shape=kernel_shape,
strides=strides,
auto_pad='SAME_LOWER',
dilations=dilations,
name=layer_name
)
self._nodes.append(conv_node)
inputs = [layer_name]
layer_name_output = layer_name
if batch_normalize:
layer_name_bn = layer_name + '_bn'
bn_param_suffixes = ['scale', 'bias', 'mean', 'var']
for suffix in bn_param_suffixes:
bn_param_name = conv_params.generate_param_name('bn', suffix)
inputs.append(bn_param_name)
batchnorm_node = helper.make_node(
'BatchNormalization',
inputs=inputs,
outputs=[layer_name_bn],
epsilon=self.epsilon_bn,
momentum=self.momentum_bn,
name=layer_name_bn
)
self._nodes.append(batchnorm_node)
inputs = [layer_name_bn]
layer_name_output = layer_name_bn
if layer_dict['activation'] == 'leaky':
layer_name_lrelu = layer_name + '_lrelu'
lrelu_node = helper.make_node(
'LeakyRelu',
inputs=inputs,
outputs=[layer_name_lrelu],
name=layer_name_lrelu,
alpha=self.alpha_lrelu
)
self._nodes.append(lrelu_node)
inputs = [layer_name_lrelu]
layer_name_output = layer_name_lrelu
elif layer_dict['activation'] == 'linear':
pass
else:
print('Activation not supported.')
self.param_dict[layer_name] = conv_params
return layer_name_output, filters
def _make_shortcut_node(self, layer_name, layer_dict):
"""Create an ONNX Add node with the shortcut properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
shortcut_index = layer_dict['from']
activation = layer_dict['activation']
assert activation == 'linear'
first_node_specs = self._get_previous_node_specs()
second_node_specs = self._get_previous_node_specs(
target_index=shortcut_index)
assert first_node_specs.channels == second_node_specs.channels
channels = first_node_specs.channels
inputs = [first_node_specs.name, second_node_specs.name]
shortcut_node = helper.make_node(
'Add',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(shortcut_node)
return layer_name, channels
def _make_route_node(self, layer_name, layer_dict):
"""If the 'layers' parameter from the DarkNet configuration is only one index, continue
node creation at the indicated (negative) index. Otherwise, create an ONNX Concat node
with the route properties from the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
route_node_indexes = layer_dict['layers']
if len(route_node_indexes) == 1:
split_index = route_node_indexes[0]
assert split_index < 0
# Increment by one because we skipped the YOLO layer:
split_index += 1
self.major_node_specs = self.major_node_specs[:split_index]
layer_name = None
channels = None
else:
inputs = list()
channels = 0
for index in route_node_indexes:
if index > 0:
# Increment by one because we count the input as a node (DarkNet
# does not)
index += 1
route_node_specs = self._get_previous_node_specs(
target_index=index)
inputs.append(route_node_specs.name)
channels += route_node_specs.channels
assert inputs
assert channels > 0
route_node = helper.make_node(
'Concat',
axis=1,
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(route_node)
return layer_name, channels
def _make_resize_node(self, layer_name, layer_dict):
"""Create an ONNX Resize node with the properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
resize_scale_factors = float(layer_dict['stride'])
# Create the scale factor array with node parameters
scales=np.array([1.0, 1.0, resize_scale_factors, resize_scale_factors]).astype(np.float32)
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
channels = previous_node_specs.channels
assert channels > 0
resize_params = ResizeParams(layer_name, scales)
# roi input is the second input, so append it before scales
roi_name = resize_params.generate_roi_name()
inputs.append(roi_name)
scales_name = resize_params.generate_param_name()
inputs.append(scales_name)
resize_node = helper.make_node(
'Resize',
coordinate_transformation_mode='asymmetric',
mode='nearest',
nearest_mode='floor',
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(resize_node)
self.param_dict[layer_name] = resize_params
return layer_name, channels
def main():
"""Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
cfg_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.cfg')
# These are the only layers DarkNetParser will extract parameters from. The three layers of
# type 'yolo' are not parsed in detail because they are included in the post-processing later:
supported_layers = ['net', 'convolutional', 'shortcut',
'route', 'upsample']
# Create a DarkNetParser object, and the use it to generate an OrderedDict with all
# layer's configs from the cfg file:
parser = DarkNetParser(supported_layers)
layer_configs = parser.parse_cfg_file(cfg_file_path)
# We do not need the parser anymore after we got layer_configs:
del parser
# In above layer_config, there are three outputs that we need to know the output
# shape of (in CHW format):
output_tensor_dims = OrderedDict()
output_tensor_dims['082_convolutional'] = [255, 19, 19]
output_tensor_dims['094_convolutional'] = [255, 38, 38]
output_tensor_dims['106_convolutional'] = [255, 76, 76]
# Create a GraphBuilderONNX object with the known output tensor dimensions:
builder = GraphBuilderONNX(output_tensor_dims)
weights_file_path = getFilePath('samples/python/yolov3_onnx/yolov3.weights')
# Now generate an ONNX graph with weights from the previously parsed layer configurations
# and the weights file:
yolov3_model_def = builder.build_onnx_graph(
layer_configs=layer_configs,
weights_file_path=weights_file_path,
verbose=True)
# Once we have the model definition, we do not need the builder anymore:
del builder
# Perform a sanity check on the ONNX model definition:
onnx.checker.check_model(yolov3_model_def)
# Serialize the generated ONNX graph to this file:
output_file_path = 'yolov3.onnx'
onnx.save(yolov3_model_def, output_file_path)
if __name__ == '__main__':
main()
| __init__ |
curse_forge.py | from .base import DownloadSource
import requests
from mod_updater.util.utils import TermColors
class CurseForgeSource(DownloadSource):
REQUIRED_ARGS = ["project_id"]
OPTIONAL_ARGS = [("release_type", "release")]
def __init__(self, project, release_type, path, mc_version):
self._file_data_url = f"https://api.cfwidget.com/minecraft/mc-mods/{project}"
self._release_type = release_type
self._path = path
self._mc_version = mc_version
def download(self):
r = requests.get(self._file_data_url)
files = r.json()["files"]
def isTargetFile(f):
return f["type"] == self._release_type and self._mc_version in f["versions"]
targetFileHuh = list(filter(isTargetFile, files))
if len(targetFileHuh) < 1:
# print(f"{TermColors.FAIL}did not found target file in {r.json()} {TermColors.ENDC}")
raise Exception("failed to find a file")
if len(targetFileHuh) > 1:
print(f"{TermColors.WARNING}WARNING, multiple curse forge file found, choosing the first one{TermColors.ENDC}")
targetFile = targetFileHuh[0]
file_id = str(targetFile["id"])
asset_name = str(targetFile["name"])
download_url = f"https://media.forgecdn.net/files/{file_id[0:4]}/{file_id[4:]}/{asset_name}"
download_response = requests.get(download_url, stream=True)
self.download_file(download_response, f"{self._path}/{asset_name}")
@classmethod
def parse(cls, json, path, mc_version):
| for arg in cls.REQUIRED_ARGS:
if arg not in json:
raise Exception("Missing required arg ", arg, "in ", json)
for (arg, default) in cls.OPTIONAL_ARGS:
if arg not in json:
json[arg] = default
project = json["project_id"]
release_type = json["release_type"]
return cls(project, release_type, path, mc_version) |
|
erlang.js | module.exports = function(hljs) {
var BASIC_ATOM_RE = '[a-z\'][a-zA-Z0-9_\']*';
var FUNCTION_NAME_RE = '(' + BASIC_ATOM_RE + ':' + BASIC_ATOM_RE + '|' + BASIC_ATOM_RE + ')';
var ERLANG_RESERVED = {
keyword:
'after and andalso|10 band begin bnot bor bsl bzr bxor case catch cond div end fun let ' +
'not of orelse|10 query receive rem try when xor',
literal:
'false true'
};
var COMMENT = {
className: 'comment',
begin: '%', end: '$',
relevance: 0
};
var NUMBER = {
className: 'number',
begin: '\\b(\\d+#[a-fA-F0-9]+|\\d+(\\.\\d+)?([eE][-+]?\\d+)?)',
relevance: 0
};
var NAMED_FUN = {
begin: 'fun\\s+' + BASIC_ATOM_RE + '/\\d+'
};
var FUNCTION_CALL = {
begin: FUNCTION_NAME_RE + '\\(', end: '\\)',
returnBegin: true,
relevance: 0,
contains: [
{
className: 'function_name', begin: FUNCTION_NAME_RE,
relevance: 0
},
{
begin: '\\(', end: '\\)', endsWithParent: true,
returnEnd: true,
relevance: 0
// "contains" defined later
}
]
};
var TUPLE = {
className: 'tuple',
begin: '{', end: '}',
relevance: 0
// "contains" defined later
};
var VAR1 = {
className: 'variable',
begin: '\\b_([A-Z][A-Za-z0-9_]*)?',
relevance: 0
};
var VAR2 = {
className: 'variable',
begin: '[A-Z][a-zA-Z0-9_]*',
relevance: 0
};
var RECORD_ACCESS = {
begin: '#', end: '}', | {
className: 'record_name',
begin: '#' + hljs.UNDERSCORE_IDENT_RE,
relevance: 0
},
{
begin: '{', endsWithParent: true,
relevance: 0
// "contains" defined later
}
]
};
var BLOCK_STATEMENTS = {
keywords: ERLANG_RESERVED,
begin: '(fun|receive|if|try|case)', end: 'end'
};
BLOCK_STATEMENTS.contains = [
COMMENT,
NAMED_FUN,
hljs.inherit(hljs.APOS_STRING_MODE, {className: ''}),
BLOCK_STATEMENTS,
FUNCTION_CALL,
hljs.QUOTE_STRING_MODE,
NUMBER,
TUPLE,
VAR1, VAR2,
RECORD_ACCESS
];
var BASIC_MODES = [
COMMENT,
NAMED_FUN,
BLOCK_STATEMENTS,
FUNCTION_CALL,
hljs.QUOTE_STRING_MODE,
NUMBER,
TUPLE,
VAR1, VAR2,
RECORD_ACCESS
];
FUNCTION_CALL.contains[1].contains = BASIC_MODES;
TUPLE.contains = BASIC_MODES;
RECORD_ACCESS.contains[1].contains = BASIC_MODES;
var PARAMS = {
className: 'params',
begin: '\\(', end: '\\)',
contains: BASIC_MODES
};
return {
keywords: ERLANG_RESERVED,
illegal: '(</|\\*=|\\+=|-=|/=|/\\*|\\*/|\\(\\*|\\*\\))',
contains: [
{
className: 'function',
begin: '^' + BASIC_ATOM_RE + '\\s*\\(', end: '->',
returnBegin: true,
illegal: '\\(|#|//|/\\*|\\\\|:',
contains: [
PARAMS,
{
className: 'title', begin: BASIC_ATOM_RE
}
],
starts: {
end: ';|\\.',
keywords: ERLANG_RESERVED,
contains: BASIC_MODES
}
},
COMMENT,
{
className: 'pp',
begin: '^-', end: '\\.',
relevance: 0,
excludeEnd: true,
returnBegin: true,
lexems: '-' + hljs.IDENT_RE,
keywords:
'-module -record -undef -export -ifdef -ifndef -author -copyright -doc -vsn ' +
'-import -include -include_lib -compile -define -else -endif -file -behaviour ' +
'-behavior',
contains: [PARAMS]
},
NUMBER,
hljs.QUOTE_STRING_MODE,
RECORD_ACCESS,
VAR1, VAR2,
TUPLE
]
};
}; | illegal: '.',
relevance: 0,
returnBegin: true,
contains: [ |
__init__.py | # flake8: noqa
| from yeelight.main import Bulb, BulbException, discover_bulbs
from yeelight.version import __version__ | """A Python library for controlling YeeLight RGB bulbs."""
from yeelight.enums import BulbType, CronType, LightType, PowerMode, SceneClass
from yeelight.flow import Flow, HSVTransition, RGBTransition, SleepTransition, TemperatureTransition |
timestampGrabber.js | // Generated by CoffeeScript 1.7.1
(function() {
var ContinentsLoaded, Timestamp, constructPattern, getPatternPositions, parse, parseDate, sugar, tz, _, _MONTHS3_ENG, _patterns,
__indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; };
sugar = require('sugar');
_ = require('underscore');
tz = require("timezone");
Timestamp = module.exports;
ContinentsLoaded = {};
_MONTHS3_ENG = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"];
_patterns = {
Y: "([0-9]{2,4})",
M: "([0-9]{1,2})",
MMM: "([\\w]{3})",
D: "([0-9]{1,2})",
H: "([0-9]{1,2})",
m: "([0-9]{1,2})",
s: "(?:[:]([0-9]{1,2}))?",
date_date: "[-|//|\\s|,]*",
time_time: "[:]",
date_time: "[T|\\s]",
undefined: "[-|//|\\s|,]*",
f: "(\.[0-9]{1,3})?",
t: "(?:[\\s])?([\\w]{2})?",
o: "([\-|\+][0-9]{1,4})",
w: "([\\w]*)"
};
getPatternPositions = function(pattern) {
var group, i, patternSplit, positions, _i, _ref;
patternSplit = pattern.split(/[/ /]|[-]|[T]|[\s]+|[\.]|[\s]*[,][\s]*|[:]/);
positions = [];
for (i = _i = 0, _ref = patternSplit.length - 1; 0 <= _ref ? _i <= _ref : _i >= _ref; i = 0 <= _ref ? ++_i : --_i) {
group = void 0;
switch (patternSplit[i].charAt(0)) {
case "Y":
group = "date";
break;
case "M":
group = "date";
break;
case "D":
group = "date";
break;
case "H":
group = "time";
break;
case "m":
group = "time";
break;
case "s":
group = "time_optional";
break;
case "f":
group = "milli_optional";
break;
case "t":
group = "timeOfDay_optional";
break;
case "+":
case "-":
case "o":
group = "offset";
break;
case "w":
group = "word";
}
if (group === void 0) {
positions[i] = {
"type": patternSplit[i],
"len": patternSplit[i].length,
"group": group
};
} else {
if (patternSplit[i] === "MMM") {
positions[i] = {
"type": patternSplit[i],
"len": patternSplit[i].length,
"group": group
};
} else if (patternSplit[i].charAt(0) === "+" || patternSplit[i].charAt(0) === "-") {
positions[i] = {
"type": patternSplit[i].charAt(1),
"len": patternSplit[i].length,
"group": group
};
} else {
positions[i] = {
"type": patternSplit[i].charAt(0),
"len": patternSplit[i].length,
"group": group
};
}
}
}
return positions;
};
constructPattern = function(patternPositions) {
var NEUTRAL, groupCurr, groupNext, i, rString, _i, _ref;
rString = "";
NEUTRAL = ["offset", "word"];
for (i = _i = 0, _ref = patternPositions.length - 1; 0 <= _ref ? _i <= _ref : _i >= _ref; i = 0 <= _ref ? ++_i : --_i) {
if (_.isUndefined(patternPositions[i].group)) {
rString = rString + "(" + patternPositions[i].type + ")"; | } else {
rString = rString + _patterns[patternPositions[i].type];
}
if (i < patternPositions.length - 1) {
groupCurr = patternPositions[i].group;
groupNext = patternPositions[i + 1].group;
if (_.isUndefined(groupCurr) || _.isUndefined(groupNext) || __indexOf.call(NEUTRAL, groupNext) >= 0 || __indexOf.call(NEUTRAL, groupCurr) >= 0) {
rString = rString + _patterns["undefined"];
} else {
if (_patterns[groupCurr + "_" + groupNext]) {
rString = rString + _patterns[groupCurr + "_" + groupNext];
}
}
}
}
return new RegExp(rString);
};
parseDate = function(dateIsoish, pattern, strictMode) {
var amPm, constructed, dateIsoishCleanedArr, daytimeHoursToAdd, i, isOptionalPattern, j, paternPositions, r, typeShouldBe, wasOptional, yearXDigits, _i, _ref;
if (pattern == null) {
pattern = "YYYY/MM/DD HH:mm:ss.fff tt";
}
if (strictMode == null) {
strictMode = false;
}
paternPositions = getPatternPositions(pattern);
constructed = constructPattern(paternPositions);
dateIsoishCleanedArr = dateIsoish.match(constructed);
if (dateIsoishCleanedArr) {
dateIsoishCleanedArr.removeAt(0);
}
isOptionalPattern = function(patternString) {
return _.isUndefined(patternString) || _.isEqual(patternString.charAt(patternString.length - 1), "?") || !_.isEqual(patternString.charAt(0), "(");
};
r = {};
j = 1;
for (i = _i = 0, _ref = paternPositions.length - 1; 0 <= _ref ? _i <= _ref : _i >= _ref; i = 0 <= _ref ? ++_i : --_i) {
typeShouldBe = paternPositions[i].type;
wasOptional = false;
if (!isOptionalPattern(_patterns[typeShouldBe])) {
if (dateIsoishCleanedArr[i] === void 0) {
throw "datepattern " + pattern + " does not match date " + dateIsoish;
}
}
amPm = "";
if (dateIsoishCleanedArr[i] !== void 0) {
switch (typeShouldBe) {
case "f":
r[typeShouldBe] = parseFloat(dateIsoishCleanedArr[i]) * 1000;
break;
case "t":
if (dateIsoishCleanedArr[i].toLowerCase() === "pm") {
amPm = "pm";
} else {
amPm = "am";
}
break;
case "Y":
yearXDigits = parseInt(dateIsoishCleanedArr[i]);
if (yearXDigits < 100) {
yearXDigits = 2000 + yearXDigits;
}
r[typeShouldBe] = yearXDigits;
break;
case "MMM":
r[typeShouldBe] = _MONTHS3_ENG.indexOf(dateIsoishCleanedArr[i].toLowerCase()) + 1;
break;
case "M":
r[typeShouldBe] = parseInt(dateIsoishCleanedArr[i]);
break;
case "o":
r[typeShouldBe] = parseInt(dateIsoishCleanedArr[i]);
break;
default:
r[typeShouldBe] = parseInt(dateIsoishCleanedArr[i]);
}
}
}
daytimeHoursToAdd = 0;
if (amPm === "pm") {
if (r["H"] === 12) {
} else {
daytimeHoursToAdd = 12;
}
} else {
if (r["H"] === 12) {
daytimeHoursToAdd = -12;
}
}
r["H"] = r["H"] + daytimeHoursToAdd;
return r;
};
Timestamp.pad = function(n, width, z) {
if (z == null) {
z = "0";
}
n = n + '';
if (n.length >= width) {
return n;
} else {
return new Array(width - n.length + 1).join(z) + n;
}
};
parse = function(dateIsoish, pattern, continentCitiy, strictMode) {
var city, continent, continentCities, dateParsed, day, hour, milliSecond, minute, month, monthRaw, r, second, sign;
if (pattern == null) {
pattern = "YYYY/MM/DD HH:mm:ss.fff tt";
}
if (continentCitiy == null) {
continentCitiy = null;
}
if (strictMode == null) {
strictMode = false;
}
dateParsed = parseDate(dateIsoish, pattern, strictMode);
if (continentCitiy == null) {
if (dateParsed.o == null) {
throw new TypeError("either offset or continent city must be defined for timezone");
}
continent = "Etc";
sign = dateParsed.o > 0 ? "+" : "";
city = "GMT" + sign + dateParsed.o.toString();
} else {
continentCities = continentCitiy.split(/[ \/]|[\s]+|[:]/);
continent = continentCities[0].trim();
city = continentCities.length === 2 ? continentCities[1].trim() : continentCities[1].trim() + continentCities[2].trim();
}
if (ContinentsLoaded[continent] === void 0) {
console.log("timestamp loded: " + continent);
ContinentsLoaded[continent] = tz(new require("timezone/" + continent));
}
monthRaw = dateParsed["M"] || dateParsed["MMM"];
month = Timestamp.pad(monthRaw.toString(), 2);
day = Timestamp.pad(dateParsed["D"].toString(), 2);
minute = "00";
second = "00";
milliSecond = "000";
if (dateParsed["m"]) {
minute = Timestamp.pad(dateParsed["m"].toString(), 2);
}
hour = Timestamp.pad(dateParsed["H"].toString(), 2);
if (dateParsed["s"]) {
second = Timestamp.pad(dateParsed["s"].toString(), 2);
}
if (dateParsed["f"]) {
milliSecond = Timestamp.pad(dateParsed["f"].toString(), 3);
}
r = ContinentsLoaded[continent](dateParsed["Y"].toString() + "-" + month + "-" + day + "T" + hour + ":" + minute + ":" + second + "." + milliSecond, continent + "/" + city);
return r;
};
Timestamp.parseDate = parseDate;
Timestamp.parse = parse;
}).call(this);
//# sourceMappingURL=timestampGrabber.map | |
test_supvisorszmq.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
from supvisors.supvisorszmq import *
from supvisors.utils import DeferredRequestHeaders
from time import sleep
from unittest.mock import call, Mock
def test_internal_publish_subscribe(supvisors):
""" Test the ZeroMQ publish-subscribe sockets used internally in Supvisors. """
# create publisher and subscriber
publisher = InternalEventPublisher(supvisors.address_mapper.local_node_name,
supvisors.options.internal_port,
supvisors.logger)
subscriber = InternalEventSubscriber(supvisors.address_mapper.node_names,
supvisors.options.internal_port)
# check that the ZMQ sockets are ready
assert not publisher.socket.closed
assert not subscriber.socket.closed
# close the sockets
publisher.close()
subscriber.close()
# check that the ZMQ socket are closed
assert publisher.socket.closed
assert subscriber.socket.closed
def test_external_publish_subscribe(supvisors):
""" Test the ZeroMQ publish-subscribe sockets used in the event interface of Supvisors. """
# get event port
port = supvisors.options.event_port
# create publisher and subscriber
publisher = EventPublisher(port, supvisors.logger)
subscriber = EventSubscriber(zmq.Context.instance(), port, supvisors.logger)
# check that the ZMQ sockets are ready
assert not publisher.socket.closed
assert not subscriber.socket.closed
# close the sockets
publisher.close()
subscriber.close()
# check that the ZMQ socket are closed
assert publisher.socket.closed
assert subscriber.socket.closed
def test_internal_pusher_puller(supvisors):
""" Test the ZeroMQ push-pull sockets used internally in Supvisors. """
# create publisher and subscriber
pusher = RequestPusher(supvisors.logger)
puller = RequestPuller()
# check that the ZMQ sockets are ready
assert not pusher.socket.closed
assert not puller.socket.closed
# close the sockets
pusher.close()
puller.close()
# check that the ZMQ socket are closed
assert pusher.socket.closed
assert puller.socket.closed
@pytest.fixture
def internal_publisher(supvisors):
test_publisher = InternalEventPublisher(supvisors.address_mapper.local_node_name,
supvisors.options.internal_port,
supvisors.logger)
yield test_publisher
test_publisher.close()
sleep(0.5)
@pytest.fixture
def internal_subscriber(supvisors):
test_subscriber = InternalEventSubscriber(supvisors.address_mapper.node_names,
supvisors.options.internal_port)
test_subscriber.socket.setsockopt(zmq.RCVTIMEO, 1000)
# publisher does not wait for subscriber clients to work, so give some time for connections
sleep(0.5)
yield test_subscriber
test_subscriber.close()
sleep(0.5)
def internal_subscriber_receive(internal_subscriber):
""" This method performs a checked reception on the subscriber. """
internal_subscriber.socket.poll(1000)
return internal_subscriber.receive()
def test_disconnection(supvisors, internal_publisher, internal_subscriber):
""" Test the disconnection of subscribers. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# test remote disconnection
node_name = next(node_name for node_name in supvisors.address_mapper.node_names
if node_name != local_node_name)
internal_subscriber.disconnect([node_name])
# send a tick event from the local publisher
payload = {'date': 1000}
internal_publisher.send_tick_event(payload)
# check the reception of the tick event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.TICK.value, local_node_name, payload)
# test local disconnection
internal_subscriber.disconnect([local_node_name])
# send a tick event from the local publisher
internal_publisher.send_tick_event(payload)
# check the non-reception of the tick event
with pytest.raises(zmq.Again):
internal_subscriber.receive()
def test_tick_event(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the messages. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# send a tick event
payload = {'date': 1000}
internal_publisher.send_tick_event(payload)
# check the reception of the tick event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.TICK.value, local_node_name, payload)
def test_process_state_event(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the process state events. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# send a process event
payload = {'name': 'dummy_program', 'state': 'running'}
internal_publisher.send_process_state_event(payload)
# check the reception of the process event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.PROCESS.value, local_node_name, payload)
def test_process_added_event(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the process added events. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# send a process event
payload = {'name': 'dummy_program', 'state': 'running'}
internal_publisher.send_process_added_event(payload)
# check the reception of the process event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.PROCESS_ADDED.value, local_node_name, payload)
def test_process_removed_event(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the process removed events. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# send a process event
payload = {'name': 'dummy_program', 'state': 'running'}
internal_publisher.send_process_removed_event(payload)
# check the reception of the process event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.PROCESS_REMOVED.value, local_node_name, payload)
def test_statistics(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the statistics messages. """
# get the local address
local_node_name = supvisors.address_mapper.local_node_name
# send a statistics event
payload = {'cpu': 15, 'mem': 5, 'io': (1234, 4321)}
internal_publisher.send_statistics(payload)
# check the reception of the statistics event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.STATISTICS.value, local_node_name, payload)
def test_state_event(supvisors, internal_publisher, internal_subscriber):
""" Test the publication and subscription of the operational event. """
# get the local node
local_node_name = supvisors.address_mapper.local_node_name
# send a process event
payload = {'statecode': 10, 'statename': 'running'}
internal_publisher.send_state_event(payload)
# check the reception of the process event
msg = internal_subscriber_receive(internal_subscriber)
assert msg == (InternalEventHeaders.STATE.value, local_node_name, payload)
@pytest.fixture
def pusher(supvisors):
test_pusher = RequestPusher(supvisors.logger)
yield test_pusher
test_pusher.close()
sleep(0.5)
@pytest.fixture
def puller(supvisors):
test_puller = RequestPuller()
# socket configuration is meant to be blocking
# however, a failure would block the unit test, so a timeout is set for emission and reception
test_puller.socket.setsockopt(zmq.SNDTIMEO, 1000)
test_puller.socket.setsockopt(zmq.RCVTIMEO, 1000)
yield test_puller
test_puller.close()
sleep(0.5)
def test_check_node(mocker, pusher, puller):
""" The method tests that the 'Check Address' request is sent and received correctly. """
pusher.send_check_node('10.0.0.1')
request = puller.receive()
assert request == (DeferredRequestHeaders.CHECK_NODE.value, ('10.0.0.1',))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_check_node('10.0.0.1')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_check_node('10.0.0.1')
def test_isolate_nodes(mocker, pusher, puller):
""" The method tests that the 'Isolate Nodes' request is sent and received correctly. """
pusher.send_isolate_nodes(['10.0.0.1', '10.0.0.2'])
request = puller.receive()
assert request == (DeferredRequestHeaders.ISOLATE_NODES.value, ('10.0.0.1', '10.0.0.2'))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_isolate_nodes(['10.0.0.1', '10.0.0.2'])
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_isolate_nodes(['10.0.0.1', '10.0.0.2'])
def test_start_process(mocker, pusher, puller):
""" The method tests that the 'Start Process' request is sent and received correctly. """
pusher.send_start_process('10.0.0.1', 'application:program', ['-extra', 'arguments'])
request = puller.receive()
assert request == (DeferredRequestHeaders.START_PROCESS.value,
('10.0.0.1', 'application:program', ['-extra', 'arguments']))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_start_process('10.0.0.1', 'application:program', ['-extra', 'arguments'])
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_start_process('10.0.0.1', 'application:program', ['-extra', 'arguments'])
def test_stop_process(mocker, pusher, puller):
""" The method tests that the 'Stop Process' request is sent and received correctly. """
pusher.send_stop_process('10.0.0.1', 'application:program')
request = puller.receive()
assert request == (DeferredRequestHeaders.STOP_PROCESS.value, ('10.0.0.1', 'application:program'))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_stop_process('10.0.0.1', 'application:program')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_stop_process('10.0.0.1', 'application:program')
def test_restart(mocker, pusher, puller):
""" The method tests that the 'Restart' request is sent and received correctly. """
pusher.send_restart('10.0.0.1')
request = puller.receive()
assert request == (DeferredRequestHeaders.RESTART.value, ('10.0.0.1',))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_restart('10.0.0.1')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_restart('10.0.0.1')
def test_shutdown(mocker, pusher, puller):
""" The method tests that the 'Shutdown' request is sent and received correctly. """
pusher.send_shutdown('10.0.0.1')
request = puller.receive()
assert request == (DeferredRequestHeaders.SHUTDOWN.value, ('10.0.0.1',))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_shutdown('10.0.0.1')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_shutdown('10.0.0.1')
def test_restart_all(mocker, pusher, puller):
""" The method tests that the 'RestartAll' request is sent and received correctly. """
pusher.send_restart_all('10.0.0.1')
request = puller.receive()
assert request == (DeferredRequestHeaders.RESTART_ALL.value, ('10.0.0.1',))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_restart_all('10.0.0.1')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_restart_all('10.0.0.1')
def test_shutdown_all(mocker, pusher, puller):
""" The method tests that the 'ShutdownAll' request is sent and received correctly. """
pusher.send_shutdown_all('10.0.0.1')
request = puller.receive()
assert request == (DeferredRequestHeaders.SHUTDOWN_ALL.value, ('10.0.0.1',))
# test that the pusher socket is not blocking
mocker.patch.object(pusher.socket, 'send_pyobj', side_effect=zmq.error.Again)
pusher.send_shutdown_all('10.0.0.1')
# test that absence of puller does not block the pusher or raise any exception
puller.close()
pusher.send_shutdown_all('10.0.0.1')
@pytest.fixture
def publisher(supvisors):
test_publisher = EventPublisher(supvisors.options.event_port, supvisors.logger)
yield test_publisher
test_publisher.close()
sleep(0.5)
@pytest.fixture
def subscriber(supvisors):
|
def check_reception(subscriber, header=None, data=None):
""" The method tests that the message is received correctly or not received at all. """
if header and data:
# check that subscriber receives the message
msg = subscriber.receive()
assert msg == (header, data)
else:
# check the non-reception of the Supvisors status
with pytest.raises(zmq.Again):
subscriber.receive()
def check_supvisors_status(subscriber, publisher, subscribed):
""" The method tests the emission and reception of a Supvisors status, depending on the subscription status. """
supvisors_payload = {'state': 'running', 'version': '1.0'}
publisher.send_supvisors_status(supvisors_payload)
if subscribed:
check_reception(subscriber, EventHeaders.SUPVISORS, supvisors_payload)
else:
check_reception(subscriber)
def check_address_status(subscriber, publisher, subscribed):
""" The method tests the emission and reception of an Address status, depending on the subscription status. """
address_payload = {'state': 'silent', 'name': 'cliche01', 'date': 1234}
publisher.send_address_status(address_payload)
if subscribed:
check_reception(subscriber, EventHeaders.ADDRESS, address_payload)
else:
check_reception(subscriber)
def check_application_status(subscriber, publisher, subscribed):
""" The method tests the emission and reception of an Application status, depending on the subscription status. """
application_payload = {'state': 'starting', 'name': 'supvisors'}
publisher.send_application_status(application_payload)
if subscribed:
check_reception(subscriber, EventHeaders.APPLICATION, application_payload)
else:
check_reception(subscriber)
def check_process_event(subscriber, publisher, subscribed):
""" The method tests the emission and reception of a Process status, depending on the subscription status. """
event_payload = {'state': 20, 'name': 'plugin', 'group': 'supvisors', 'now': 1230}
publisher.send_process_event('local_address', event_payload)
if subscribed:
event_payload['address'] = 'local_address'
check_reception(subscriber, EventHeaders.PROCESS_EVENT, event_payload)
else:
check_reception(subscriber)
def check_process_status(subscriber, publisher, subscribed):
""" The method tests the emission and reception of a Process status, depending on the subscription status. """
process_payload = {'state': 'running', 'process_name': 'plugin', 'application_name': 'supvisors', 'date': 1230}
publisher.send_process_status(process_payload)
if subscribed:
check_reception(subscriber, EventHeaders.PROCESS_STATUS, process_payload)
else:
check_reception(subscriber)
def check_subscription(subscriber, publisher, supvisors_subscribed, address_subscribed,
application_subscribed, event_subscribed, process_subscribed):
""" The method tests the emission and reception of all status, depending on their subscription status. """
sleep(1)
check_supvisors_status(subscriber, publisher, supvisors_subscribed)
check_address_status(subscriber, publisher, address_subscribed)
check_application_status(subscriber, publisher, application_subscribed)
check_process_event(subscriber, publisher, event_subscribed)
check_process_status(subscriber, publisher, process_subscribed)
def test_no_subscription(publisher, subscriber):
""" Test the non-reception of messages when subscription is not set. """
# at this stage, no subscription has been set so nothing should be received
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_supvisors_status(publisher, subscriber):
""" Test the reception of Supvisors status messages when related subscription is set. """
# subscribe to Supvisors status only
subscriber.subscribe_supvisors_status()
check_subscription(subscriber, publisher, True, False, False, False, False)
# unsubscribe from Supvisors status
subscriber.unsubscribe_supvisors_status()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_address_status(publisher, subscriber):
""" Test the reception of Address status messages when related subscription is set. """
# subscribe to Address status only
subscriber.subscribe_address_status()
check_subscription(subscriber, publisher, False, True, False, False, False)
# unsubscribe from Address status
subscriber.unsubscribe_address_status()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_application_status(publisher, subscriber):
""" Test the reception of Application status messages when related subscription is set. """
# subscribe to Application status only
subscriber.subscribe_application_status()
check_subscription(subscriber, publisher, False, False, True, False, False)
# unsubscribe from Application status
subscriber.unsubscribe_application_status()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_process_event(publisher, subscriber):
""" Test the reception of Process event messages when related subscription is set. """
# subscribe to Process event only
subscriber.subscribe_process_event()
check_subscription(subscriber, publisher, False, False, False, True, False)
# unsubscribe from Process event
subscriber.unsubscribe_process_event()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_process_status(publisher, subscriber):
""" Test the reception of Process status messages when related subscription is set. """
# subscribe to Process status only
subscriber.subscribe_process_status()
check_subscription(subscriber, publisher, False, False, False, False, True)
# unsubscribe from Process status
subscriber.unsubscribe_process_status()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_all_status(publisher, subscriber):
""" Test the reception of all status messages when related subscription is set. """
# subscribe to every status
subscriber.subscribe_all()
check_subscription(subscriber, publisher, True, True, True, True, True)
# unsubscribe all
subscriber.unsubscribe_all()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_subscription_multiple_status(publisher, subscriber):
""" Test the reception of multiple status messages when related subscription is set. """
# subscribe to Application and Process Event
subscriber.subscribe_application_status()
subscriber.subscribe_process_event()
check_subscription(subscriber, publisher, False, False, True, True, False)
# set subscription to Address and Process Status
subscriber.unsubscribe_application_status()
subscriber.unsubscribe_process_event()
subscriber.subscribe_process_status()
subscriber.subscribe_address_status()
check_subscription(subscriber, publisher, False, True, False, False, True)
# add subscription to Supvisors Status
subscriber.subscribe_supvisors_status()
check_subscription(subscriber, publisher, True, True, False, False, True)
# unsubscribe all
subscriber.unsubscribe_supvisors_status()
subscriber.unsubscribe_address_status()
subscriber.unsubscribe_process_status()
check_subscription(subscriber, publisher, False, False, False, False, False)
def test_supervisor_creation_closure(supvisors):
""" Test the attributes created in SupervisorZmq constructor. """
sockets = SupervisorZmq(supvisors)
# test all attribute types
assert isinstance(sockets.publisher, EventPublisher)
assert not sockets.publisher.socket.closed
assert isinstance(sockets.internal_publisher, InternalEventPublisher)
assert not sockets.internal_publisher.socket.closed
assert isinstance(sockets.pusher, RequestPusher)
assert not sockets.pusher.socket.closed
# close the instance
sockets.close()
assert sockets.publisher.socket.closed
assert sockets.internal_publisher.socket.closed
assert sockets.pusher.socket.closed
def test_supvisors_creation_closure(supvisors):
""" Test the attributes created in SupvisorsZmq constructor. """
sockets = SupvisorsZmq(supvisors)
# test all attribute types
assert isinstance(sockets.internal_subscriber, InternalEventSubscriber)
assert not sockets.internal_subscriber.socket.closed
assert isinstance(sockets.puller, RequestPuller)
assert not sockets.puller.socket.closed
assert sockets.puller.socket in sockets.poller._map
assert sockets.internal_subscriber.socket in sockets.poller._map
# close the instance
sockets.close()
assert sockets.poller._map == {}
assert sockets.internal_subscriber.socket.closed
assert sockets.puller.socket.closed
def test_poll(supvisors):
""" Test the poll method of the SupvisorsZmq class. """
sockets = SupvisorsZmq(supvisors)
assert sockets.poll() == {}
def test_check_puller(mocker, supvisors):
""" Test the check_puller method of the SupvisorsZmq class. """
mocked_check = mocker.patch('supvisors.supvisorszmq.SupvisorsZmq.check_socket', return_value='checked')
sockets = SupvisorsZmq(supvisors)
param = Mock()
assert sockets.check_puller(param) == 'checked'
assert mocked_check.call_args_list == [call(sockets.puller, param)]
def test_check_subscriber(mocker, supvisors):
""" Test the check_subscriber method of the SupvisorsZmq class. """
mocked_check = mocker.patch('supvisors.supvisorszmq.SupvisorsZmq.check_socket', return_value='checked')
sockets = SupvisorsZmq(supvisors)
param = Mock()
assert sockets.check_subscriber(param) == 'checked'
assert mocked_check.call_args_list == [call(sockets.internal_subscriber, param)]
def test_check_socket(mocker, supvisors):
""" Test the types of the attributes created. """
mocker.patch('builtins.print')
sockets = SupvisorsZmq(supvisors)
# prepare context
mocked_sockets = Mock(socket='socket', **{'receive.side_effect': ZMQError})
# test with empty poll result
poll_result = {}
# test with socket not in poll result
assert sockets.check_socket(mocked_sockets, poll_result) is None
assert not mocked_sockets.receive.called
# test with socket in poll result but with pollout tag
poll_result = {'socket': zmq.POLLOUT}
assert sockets.check_socket(mocked_sockets, poll_result) is None
assert not mocked_sockets.receive.called
# test with socket in poll result and with pollin tag
# test exception
poll_result = {'socket': zmq.POLLIN}
assert sockets.check_socket(mocked_sockets, poll_result) is None
assert mocked_sockets.receive.called
mocked_sockets.receive.reset_mock()
# test with socket in poll result and with pollin tag
# test normal behaviour
mocked_sockets.receive.side_effect = None
mocked_sockets.receive.return_value = 'message'
assert sockets.check_socket(mocked_sockets, poll_result) == 'message'
assert mocked_sockets.receive.called
def test_disconnect_subscriber(mocker, supvisors):
""" Test the types of the attributes created. """
mocked_disconnect = mocker.patch('supvisors.supvisorszmq.InternalEventSubscriber.disconnect')
sockets = SupvisorsZmq(supvisors)
# test disconnect on unknown address
sockets.disconnect_subscriber(['10.0.0.1'])
assert mocked_disconnect.call_args_list == [call(['10.0.0.1'])]
| test_subscriber = EventSubscriber(zmq.Context.instance(), supvisors.options.event_port, supvisors.logger)
# WARN: this subscriber does not include a subscription
# when using a subscription, use a time sleep to give time to PyZMQ to handle it
# sleep(0.5)
# WARN: socket configuration is meant to be blocking
# however, a failure would block the unit test, so a timeout is set for reception
test_subscriber.socket.setsockopt(zmq.RCVTIMEO, 1000)
yield test_subscriber
test_subscriber.close()
sleep(0.5) |
setup.py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.0.24"
CLASSIFIERS = [ | 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'adal>=0.4.7',
'azure-cli-core',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-profile',
version=VERSION,
description='Microsoft Azure Command-Line Tools Profile Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.profile',
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
) | |
gitlab_test.go | package input
import (
"net/http"
"net/http/httptest"
"os"
"testing"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
func | (t *testing.T) {
viper.SetConfigName("testconfig")
viper.AddConfigPath(".")
err := viper.ReadInConfig()
if err != nil {
log.Panic(err)
}
file, e := os.Open("./test_data/gitlab.json")
if e != nil {
log.Fatal(e)
}
req, err := http.NewRequest("POST", "/", file)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Gitlab-Event", "Push Hook")
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
var gitlabModule Module = &GitlabModule{}
c := make(chan IRCMessage, 10)
gitlabModule.Init(viper.Sub("modules.gitlab"), &c)
handler := http.HandlerFunc(gitlabModule.GetHandler())
handler.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("Handler returned wrong status code: got %v wanted %v",
status, http.StatusOK)
}
}
| TestGitlabHandler |
tasks.py | from pathlib import Path
import shutil
from jinja2 import Template
from invoke import task
import jupytext
_TARGET = Path('~', 'dev', 'ploomber').expanduser()
@task
def setup(c, from_lock=False):
"""Create conda environment
"""
if from_lock:
c.run('conda env create --file environment.yml --force')
else:
c.run('conda env create --file environment.dev.yml --force')
c.run('conda env export --no-build --file environment.yml'
' --name ploomber-workshop')
@task
def convert(c):
"""Generate README.md and index.md. Convert index.md to index.ipynb
"""
print('Generating index.ipynb...')
nb = jupytext.read('index.md')
jupytext.write(nb, 'index.ipynb')
print('Generating index.es.ipynb...')
nb = jupytext.read('index.es.md')
jupytext.write(nb, 'index.es.ipynb')
files_to_copy = [
('_static/workshop.svg', None),
('_static/workshop.es.svg', None),
('README.md', 'workshop.md'),
('README.es.md', 'workshop.es.md'),
]
for f, target in files_to_copy:
if target is None:
target = _TARGET / f
else:
target = _TARGET / target
print(f'Copying {f} to {target}...')
shutil.copy(f, target)
@task
def | (c):
"""Clear outputs generated when running the code
"""
playground = Path('playground')
if playground.exists():
shutil.rmtree('playground')
playground.mkdir(exist_ok=True)
shutil.copy('_sample/nb.ipynb', 'playground/nb.ipynb')
| clear |
Navigation.js | import React, { Component } from 'react';
class | extends Component {
render() {
return (
<nav>
<header><span className="title">Navigation</span></header>
<ul>
<li><a href="#">Home</a></li>
<li><a href="#">Catalog</a></li>
<li><a href="#">About</a></li>
<li><a href="#">Contact Us</a></li>
</ul>
</nav>
)
}
}
export default Navigation; | Navigation |
metadata_generator.rs | use crate::{BindgenArgType, ImplItemMethodInfo, InputStructType, SerializerType};
use quote::quote;
use syn::export::TokenStream2;
use syn::ReturnType;
impl ImplItemMethodInfo {
/// Generates metadata struct for this method.
///
/// # Example:
/// The following method:
/// ```ignore
/// fn f3(&mut self, arg0: FancyStruct, arg1: u64) -> Result<IsOk, Error> { }
/// ```
/// will produce this struct:
/// ```ignore
/// near_sdk::MethodMetadata {
/// name: "f3".to_string(),
/// is_view: false,
/// is_init: false,
/// args: {
/// #[derive(borsh::BorshSchema)]
/// #[derive(serde :: Deserialize, serde :: Serialize)]
/// struct Input {
/// arg0: FancyStruct,
/// arg1: u64,
/// }
/// Some(Input::schema_container())
/// },
/// callbacks: vec![],
/// callbacks_vec: None,
/// result: Some(Result < IsOk, Error > ::schema_container())
/// }
/// ```
/// If args are serialized with Borsh it will not include `#[derive(borsh::BorshSchema)]`.
pub fn metadata_struct(&self) -> TokenStream2 {
let method_name_str = self.attr_signature_info.ident.to_string();
let is_view = match &self.attr_signature_info.receiver {
None => true,
Some(rec) => rec.mutability.is_none(),
};
let is_init = self.attr_signature_info.is_init;
let args = if self.attr_signature_info.input_args().next().is_some() {
let input_struct =
self.attr_signature_info.input_struct(InputStructType::Deserialization);
// If input args are JSON then we need to additionally specify schema for them.
let additional_schema = match &self.attr_signature_info.input_serializer {
SerializerType::Borsh => TokenStream2::new(),
SerializerType::JSON => quote! {
#[derive(borsh::BorshSchema)]
},
};
quote! { | #input_struct
Some(Input::schema_container())
}
}
} else {
quote! {
None
}
};
let callbacks: Vec<_> = self
.attr_signature_info
.args
.iter()
.filter(|arg| match arg.bindgen_ty {
BindgenArgType::CallbackArg => true,
_ => false,
})
.map(|arg| {
let ty = &arg.ty;
quote! {
#ty::schema_container()
}
})
.collect();
let callbacks_vec = match self
.attr_signature_info
.args
.iter()
.filter(|arg| match arg.bindgen_ty {
BindgenArgType::CallbackArgVec => true,
_ => false,
})
.last()
{
None => {
quote! {
None
}
}
Some(arg) => {
let ty = &arg.ty;
quote! {
Some(#ty::schema_container())
}
}
};
let result = match &self.attr_signature_info.returns {
ReturnType::Default => {
quote! {
None
}
}
ReturnType::Type(_, ty) => {
quote! {
Some(#ty::schema_container())
}
}
};
quote! {
near_sdk::MethodMetadata {
name: #method_name_str.to_string(),
is_view: #is_view,
is_init: #is_init,
args: #args,
callbacks: vec![#(#callbacks),*],
callbacks_vec: #callbacks_vec,
result: #result
}
}
}
} | {
#additional_schema |
themes.js | import { NightShiftDark } from './themes/night-shift-dark'
import { NightShiftLight } from './themes/night-shift-light'
import { MacOSDark } from './themes/mac-os-dark'
import { MacOSLight } from './themes/mac-os-light'
import { OneDark } from './themes/one-dark'
import { OneLight } from './themes/one-light'
import { GruvboxLight } from './themes/gruvbox-light'
import { GruvboxDark } from './themes/gruvbox-dark'
import { RoseboxDark } from './themes/rosebox-dark'
import { Dracula } from './themes/dracula'
import { Nord } from './themes/nord'
import { Amarena } from './themes/amarena'
import { Soil } from './themes/soil-dark'
export const Themes = {
NightShiftDark,
NightShiftLight,
MacOSDark,
MacOSLight,
OneDark,
OneLight,
GruvboxLight,
GruvboxDark,
RoseboxDark,
Soil,
Dracula,
Nord, | Amarena
} |
|
test_invenio_circulation.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
# Copyright (C) 2018-2019 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
| """Test version import."""
from invenio_circulation import __version__
assert __version__ |
def test_version(): |
urls.py | from django.urls import path
from .views import (
UserSelfView,
)
| urlpatterns = [
path("users/self/profile/", UserSelfView.as_view(), name="user_self"),
] |
|
flip-columns-for-maximum-number-of-equal-rows.py | class Solution:
def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:
pattern = collections.defaultdict(int)
for row in matrix:
pattern[tuple(row)] += 1 | pattern[tuple(1 - c for c in row)] += 1
return max(pattern.values()) |
|
mod.rs | use crate::db;
use regex::Regex;
use serde::{Deserialize, Serialize};
use mongodb::bson::doc;
const GROCERY_COLLECTION_NAME : &str = "groceries";
const GROCERY_HELP : &str = "Grocery allowed commands:
list [category]
add {category}
product1
product2
product3
...
rem {product_id}";
const MAX_ITEMS_IN_DB : u32 = 10000;
#[derive(Debug, Serialize, Deserialize)]
struct Groceries {
category: String,
groid: u32,
product: String,
} |
pub async fn handle_grocery_command(cmd: String, db: Box<db::Homechatbotdb>) -> String {
match db.check_collection_exists(GROCERY_COLLECTION_NAME).await {
Ok(exists) => {
if !exists {
match db.create_collection(GROCERY_COLLECTION_NAME).await {
Ok(_) => {},
Err(e) => return String::from(format!("{}", e)),
};
}
},
Err(e) => return String::from(format!("{}", e)),
};
match db.get_collection_index(GROCERY_COLLECTION_NAME).await {
Ok(indxs) => {
let mut groid_inside = false;
for indx in indxs {
if indx.starts_with("groid") {
groid_inside = true;
break;
}
}
if !groid_inside {
println!("Setting grocery ID as index");
match db.create_collection_index(GROCERY_COLLECTION_NAME, "groid").await {
Ok(_) => {},
Err(e) => return String::from(format!("{}", e)),
};
}
},
Err(e) => return String::from(format!("{}", e)),
};
let re = match Regex::new(r"^(?s)(\w+)(?:\s+(.*))?$") {
Ok(r) => r,
Err(e) => return String::from(format!("ERROR: {}", e)),
};
let caps = match re.captures(cmd.as_str()) {
Some(c) => c,
None => return String::from(GROCERY_HELP),
};
let cmd = match caps.get(1) {
Some(c) => c.as_str().to_lowercase(),
None => return String::from(GROCERY_HELP),
};
if cmd == "list" {
let fltr = match caps.get(2) {
Some(c) => vec![c.as_str()],
None => vec![],
};
return handle_list_request(fltr, db).await;
} else if cmd == "add" {
match caps.get(2) {
Some(c) => return handle_add_request(c.as_str(), db).await,
None => return String::from(GROCERY_HELP),
};
} else if cmd == "rem" {
match caps.get(2) {
Some(c) => return handle_remove_request(c.as_str(), db).await,
None => return String::from(GROCERY_HELP),
};
}
return String::from(GROCERY_HELP);
}
async fn handle_add_request(cmd_rest: &str, db: Box<db::Homechatbotdb>) -> String {
let re = match Regex::new(r"^(?s)(.*?)\n(.*)$") {
Ok(r) => r,
Err(e) => return String::from(format!("ERROR: {}", e)),
};
let caps = match re.captures(cmd_rest) {
Some(c) => c,
None => return String::from(GROCERY_HELP),
};
let category = match caps.get(1) {
Some(c) => c.as_str(),
None => return String::from(GROCERY_HELP),
};
let products = match caps.get(2) {
Some(p) => p.as_str(),
None => return String::from(GROCERY_HELP),
};
let prodarr = products.split("\n");
for sprod in prodarr {
if sprod.trim() == "" {
continue;
}
let mut success = false;
while !success {
let id = match get_smallest_available_id(db.clone()).await {
Ok(i) => i,
Err(e) => return String::from(format!("ERROR: {}", e)),
};
match db.insert_data_to_collection(GROCERY_COLLECTION_NAME, vec![doc! {"product": sprod, "category": category, "groid": id}]).await {
Ok(_) => {
success = true;
},
Err(e) => {
let err = format!("{}", e);
if err.contains("E11000 duplicate key error collection") {
continue;
} else {
return err;
}
},
};
}
}
return String::from("Items successfully added!");
}
async fn get_smallest_available_id(db: Box<db::Homechatbotdb>) -> Result<u32, String> {
let items = match db.get_generic_data_collection::<Groceries>(GROCERY_COLLECTION_NAME, doc!{}, doc!{}).await {
Ok(i) => i,
Err(e) => return Err(String::from(format!("{}", e))),
};
let mut bufv : Vec<u32> = vec![];
for pro in items {
bufv.push(pro.groid);
}
for n in 1..MAX_ITEMS_IN_DB {
if !bufv.contains(&n) {
return Ok(n);
}
}
return Err(format!("Too many products in the database"))
}
async fn handle_list_request(spec_cat: Vec<&str>, db: Box<db::Homechatbotdb>) -> String {
let fltr = if spec_cat.len() > 0 {
doc!{"category": spec_cat[0]}
} else {
doc!{}
};
let items = match db.get_generic_data_collection::<Groceries>(GROCERY_COLLECTION_NAME, fltr, doc!{"category":1}).await {
Ok(i) => i,
Err(e) => return format!("Error getting groceries: {}", e).to_string(),
};
let mut msg : String = if items.len() > 0 {
"".to_string()
} else {
"List is empty".to_string()
};
let mut prev_cat : String = "".to_string();
for pro in items {
if pro.category != prev_cat {
msg = format!("{}{}:\n", msg, pro.category);
prev_cat = pro.category;
}
msg = format!("{}({}) {}\n", msg, pro.groid, pro.product);
}
return msg;
}
async fn handle_remove_request(cmd_rest: &str, db: Box<db::Homechatbotdb>) -> String {
let items = cmd_rest.split(",");
for itm in items {
let id = match itm.parse::<u32>() {
Ok(i) => i,
Err(e) => return format!("Only numbers are allowed: {}\n{}", e, GROCERY_HELP).to_string(),
};
match db.remove_data(GROCERY_COLLECTION_NAME, doc!{"groid":id}).await {
Ok(_) => {},
Err(e) => return format!("{}", e),
};
}
return "Items successfully removed".to_string();
} | |
keyring.rs | //! ECDSA key ring.
use crate::{Error, KeyHandle, LoadPkcs8, Result};
#[allow(unused_imports)]
use ecdsa::elliptic_curve::AlgorithmParameters;
#[cfg(feature = "nistp256")]
use super::nistp256;
#[cfg(feature = "secp256k1")]
use super::secp256k1;
/// ECDSA key ring.
#[derive(Debug, Default)]
pub struct KeyRing {
/// ECDSA/P-256 keys.
#[cfg(feature = "nistp256")]
#[cfg_attr(docsrs, doc(cfg(feature = "nistp256")))]
pub nistp256: nistp256::KeyRing,
/// ECDSA/secp256k1 keys.
#[cfg(feature = "secp256k1")]
#[cfg_attr(docsrs, doc(cfg(feature = "secp256k1")))]
pub secp256k1: secp256k1::KeyRing,
}
impl LoadPkcs8 for KeyRing {
fn load_pkcs8(&mut self, private_key: pkcs8::PrivateKeyInfo<'_>) -> Result<KeyHandle> |
}
| {
if private_key.algorithm.oid != ecdsa::elliptic_curve::ALGORITHM_OID {
return Err(Error::AlgorithmInvalid);
}
match private_key.algorithm.parameters_oid()? {
#[cfg(feature = "nistp256")]
p256::NistP256::OID => self.nistp256.load_pkcs8(private_key),
#[cfg(feature = "secp256k1")]
k256::Secp256k1::OID => self.secp256k1.load_pkcs8(private_key),
_ => Err(Error::AlgorithmInvalid),
}
} |
config.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software |
// Package memorylimiter provides a processor for OpenTelemetry Service pipeline
// that drops data on the pipeline according to the current state of memory
// usage.
package memorylimiter
import (
"time"
"go.opentelemetry.io/collector/config/configmodels"
)
// Config defines configuration for memory memoryLimiter processor.
type Config struct {
configmodels.ProcessorSettings `mapstructure:",squash"`
// CheckInterval is the time between measurements of memory usage for the
// purposes of avoiding going over the limits. Defaults to zero, so no
// checks will be performed.
CheckInterval time.Duration `mapstructure:"check_interval"`
// MemoryLimitMiB is the maximum amount of memory, in MiB, targeted to be
// allocated by the process.
MemoryLimitMiB uint32 `mapstructure:"limit_mib"`
// MemorySpikeLimitMiB is the maximum, in MiB, spike expected between the
// measurements of memory usage.
MemorySpikeLimitMiB uint32 `mapstructure:"spike_limit_mib"`
// BallastSizeMiB is the size, in MiB, of the ballast size being used by the
// process.
BallastSizeMiB uint32 `mapstructure:"ballast_size_mib"`
// MemoryLimitPercentage is the maximum amount of memory, in %, targeted to be
// allocated by the process. The fixed memory settings MemoryLimitMiB has a higher precedence.
MemoryLimitPercentage uint32 `mapstructure:"limit_percentage"`
// MemorySpikePercentage is the maximum, in percents against the total memory,
// spike expected between the measurements of memory usage.
MemorySpikePercentage uint32 `mapstructure:"spike_limit_percentage"`
}
// Name of BallastSizeMiB config option.
const ballastSizeMibKey = "ballast_size_mib" | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License. |
airbyte_protocol.py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
# generated by datamodel-codegen:
# filename: airbyte_protocol.yaml
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyUrl, BaseModel, Extra, Field
class Type(Enum):
RECORD = "RECORD"
STATE = "STATE"
LOG = "LOG"
SPEC = "SPEC"
CONNECTION_STATUS = "CONNECTION_STATUS"
CATALOG = "CATALOG"
class AirbyteRecordMessage(BaseModel):
class Config:
extra = Extra.allow
stream: str = Field(..., description="the name of this record's stream")
data: Dict[str, Any] = Field(..., description="the record data")
emitted_at: int = Field(
...,
description="when the data was emitted from the source. epoch in millisecond.",
)
namespace: Optional[str] = Field(None, description="the namespace of this record's stream")
class AirbyteStateMessage(BaseModel):
class Config:
extra = Extra.allow
data: Dict[str, Any] = Field(..., description="the state data")
class Level(Enum):
FATAL = "FATAL"
ERROR = "ERROR"
WARN = "WARN"
INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
class AirbyteLogMessage(BaseModel):
class Config:
extra = Extra.allow
level: Level = Field(..., description="the type of logging")
message: str = Field(..., description="the log message")
class Status(Enum):
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
class AirbyteConnectionStatus(BaseModel):
class Config:
extra = Extra.allow
status: Status
message: Optional[str] = None
class SyncMode(Enum):
full_refresh = "full_refresh"
incremental = "incremental"
class DestinationSyncMode(Enum):
append = "append"
overwrite = "overwrite"
append_dedup = "append_dedup"
class OAuth2Specification(BaseModel):
class Config:
extra = Extra.allow
rootObject: Optional[List[Union[str, int]]] = Field(
None,
description="A list of strings representing a pointer to the root object which contains any oauth parameters in the ConnectorSpecification.\nExamples:\nif oauth parameters were contained inside the top level, rootObject=[] If they were nested inside another object {'credentials': {'app_id' etc...}, rootObject=['credentials'] If they were inside a oneOf {'switch': {oneOf: [{client_id...}, {non_oauth_param]}}, rootObject=['switch', 0] ",
)
oauthFlowInitParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject needed to obtain the initial refresh/access tokens for the OAuth flow. Each inner array represents the path in the rootObject of the referenced field. For example. Assume the rootObject contains params 'app_secret', 'app_id' which are needed to get the initial refresh token. If they are not nested in the rootObject, then the array would look like this [['app_secret'], ['app_id']] If they are nested inside an object called 'auth_params' then this array would be [['auth_params', 'app_secret'], ['auth_params', 'app_id']]",
)
oauthFlowOutputParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject which can be populated from successfully completing the oauth flow using the init parameters. This is typically a refresh/access token. Each inner array represents the path in the rootObject of the referenced field.",
)
class AuthType(Enum):
oauth2_0 = "oauth2.0"
class AuthSpecification(BaseModel):
auth_type: Optional[AuthType] = None
oauth2Specification: Optional[OAuth2Specification] = Field(
None,
description="If the connector supports OAuth, this field should be non-null.",
)
class AuthFlowType(Enum):
oauth2_0 = "oauth2.0"
oauth1_0 = "oauth1.0"
class OAuthConfigSpecification(BaseModel):
oauth_user_input_from_connector_config_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations used as input to OAuth.\nMust be a valid non-nested JSON that refers to properties from ConnectorSpecification.connectionSpecification\nusing special annotation 'path_in_connector_config'.\nThese are input values the user is entering through the UI to authenticate to the connector, that might also shared\nas inputs for syncing data via the connector.\n\nExamples:\n\nif no connector values is shared during oauth flow, oauth_user_input_from_connector_config_specification=[]\nif connector values such as 'app_id' inside the top level are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['app_id']\n }\n }\nif connector values such as 'info.app_id' nested inside another object are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['info', 'app_id']\n }\n }",
)
complete_oauth_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations produced by the OAuth flows as they are\nreturned by the distant OAuth APIs.\nMust be a valid JSON describing the fields to merge back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_output_specification={\n refresh_token: {\n type: string,\n path_in_connector_config: ['credentials', 'refresh_token']\n }\n }",
)
complete_oauth_server_input_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations.\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nserver when completing an OAuth flow (typically exchanging an auth code for refresh token).\n\nExamples:\n\n complete_oauth_server_input_specification={\n client_id: {\n type: string\n },\n client_secret: {\n type: string\n }\n }",
)
complete_oauth_server_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations that\nalso need to be merged back into the connector configuration at runtime.\nThis is a subset configuration of `complete_oauth_server_input_specification` that filters fields out to retain only the ones that\nare necessary for the connector to function with OAuth. (some fields could be used during oauth flows but not needed afterwards, therefore\nthey would be listed in the `complete_oauth_server_input_specification` but not `complete_oauth_server_output_specification`)\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nconnector when using OAuth flow APIs.\nThese fields are to be merged back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_server_output_specification={\n client_id: {\n type: string,\n path_in_connector_config: ['credentials', 'client_id']\n },\n client_secret: {\n type: string,\n path_in_connector_config: ['credentials', 'client_secret']\n }\n }",
)
class AirbyteStream(BaseModel):
class Config:
extra = Extra.allow
name: str = Field(..., description="Stream's name.")
json_schema: Dict[str, Any] = Field(..., description="Stream schema using Json Schema specs.")
supported_sync_modes: Optional[List[SyncMode]] = None
source_defined_cursor: Optional[bool] = Field(
None,
description="If the source defines the cursor field, then any other cursor field inputs will be ignored. If it does not, either the user_provided one is used, or the default one is used as a backup.",
)
default_cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. If not provided by the source, the end user will have to specify the comparable themselves.",
)
source_defined_primary_key: Optional[List[List[str]]] = Field(
None,
description="If the source defines the primary key, paths to the fields that will be used as a primary key. If not provided by the source, the end user will have to specify the primary key themselves.",
)
namespace: Optional[str] = Field(
None,
description="Optional Source-defined namespace. Currently only used by JDBC destinations to determine what schema to write to. Airbyte streams from the same sources should have the same namespace.",
)
class ConfiguredAirbyteStream(BaseModel):
class Config:
|
stream: AirbyteStream
sync_mode: SyncMode
cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. This field is REQUIRED if `sync_mode` is `incremental`. Otherwise it is ignored.",
)
destination_sync_mode: DestinationSyncMode
primary_key: Optional[List[List[str]]] = Field(
None,
description="Paths to the fields that will be used as primary key. This field is REQUIRED if `destination_sync_mode` is `*_dedup`. Otherwise it is ignored.",
)
class AdvancedAuth(BaseModel):
auth_flow_type: Optional[AuthFlowType] = None
predicate_key: Optional[List[str]] = Field(
None,
description="Json Path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.",
)
predicate_value: Optional[str] = Field(
None,
description="Value of the predicate_key fields for the advanced auth to be applicable.",
)
oauth_config_specification: Optional[OAuthConfigSpecification] = None
class ConnectorSpecification(BaseModel):
class Config:
extra = Extra.allow
documentationUrl: Optional[AnyUrl] = None
changelogUrl: Optional[AnyUrl] = None
connectionSpecification: Dict[str, Any] = Field(
...,
description="ConnectorDefinition specific blob. Must be a valid JSON string.",
)
supportsIncremental: Optional[bool] = Field(None, description="If the connector supports incremental mode or not.")
supportsNormalization: Optional[bool] = Field(False, description="If the connector supports normalization or not.")
supportsDBT: Optional[bool] = Field(False, description="If the connector supports DBT or not.")
supported_destination_sync_modes: Optional[List[DestinationSyncMode]] = Field(
None, description="List of destination sync modes supported by the connector"
)
authSpecification: Optional[AuthSpecification] = Field(None, description="deprecated, switching to advanced_auth instead")
advanced_auth: Optional[AdvancedAuth] = Field(
None,
description="Additional and optional specification object to describe what an 'advanced' Auth flow would need to function.\n - A connector should be able to fully function with the configuration as described by the ConnectorSpecification in a 'basic' mode.\n - The 'advanced' mode provides easier UX for the user with UI improvements and automations. However, this requires further setup on the\n server side by instance or workspace admins beforehand. The trade-off is that the user does not have to provide as many technical\n inputs anymore and the auth process is faster and easier to complete.",
)
class AirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[AirbyteStream]
class ConfiguredAirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[ConfiguredAirbyteStream]
class AirbyteMessage(BaseModel):
class Config:
extra = Extra.allow
type: Type = Field(..., description="Message type")
log: Optional[AirbyteLogMessage] = Field(
None,
description="log message: any kind of logging you want the platform to know about.",
)
spec: Optional[ConnectorSpecification] = None
connectionStatus: Optional[AirbyteConnectionStatus] = None
catalog: Optional[AirbyteCatalog] = Field(None, description="catalog message: the catalog")
record: Optional[AirbyteRecordMessage] = Field(None, description="record message: the record")
state: Optional[AirbyteStateMessage] = Field(
None,
description="schema message: the state. Must be the last message produced. The platform uses this information",
)
class AirbyteProtocol(BaseModel):
airbyte_message: Optional[AirbyteMessage] = None
configured_airbyte_catalog: Optional[ConfiguredAirbyteCatalog] = None
| extra = Extra.allow |
networks_graphx_refine_no_img_encoder.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import torchvision.models
import os
import utils.network_utils
from utils.pointnet2_utils import PointNetSetAbstraction,PointNetFeaturePropagation
import cuda.emd.emd_module as emd
# Set the path for pretrain weight
os.environ['TORCH_HOME'] = '/media/caig/FECA2C89CA2C406F/sketch3D/pretrain_models'
Conv = nn.Conv2d
def wrapper(func, *args, **kwargs):
class Wrapper(nn.Module):
def __init__(self):
super().__init__()
self.func = func
def forward(self, input):
return self.func(input, *args, **kwargs)
return Wrapper()
class TransformPC(nn.Module):
"""
Transform point cloud to camera coordinate
Input:
xyz: float tensor, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
az: float tensor, (BS); azimuthal angle of camera in radians
el: float tensor, (BS); elevation of camera in radians
Output:
xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera
co-ordinates
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.n_pts = cfg.CONST.NUM_POINTS
def forward(self, xyz, az, el):
batch_size = xyz.size(0)
cam_xyz = self.world2cam(xyz, az, el, batch_size, N_PTS=self.n_pts)
return cam_xyz
def world2cam(self, xyz, az, el, batch_size, N_PTS=1024):
# y ---> x
rotmat_az=[
[torch.cos(az),torch.sin(az),torch.zeros_like(az)],
[-torch.sin(az),torch.cos(az),torch.zeros_like(az)],
[torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)]
]
rotmat_az = [ torch.stack(x) for x in rotmat_az ]
# z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z
rotmat_el=[
[torch.cos(el),torch.zeros_like(az), torch.sin(el)],
[torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)],
[-torch.sin(el),torch.zeros_like(az), torch.cos(el)]
]
rotmat_el = [ torch.stack(x) for x in rotmat_el ]
rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B]
rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B]
rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3]
rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3]
rotmat = torch.matmul(rotmat_el, rotmat_az)
# Transformation(t)
# Distance of object from camera - fixed to 2
d = 2.
# Calculate translation params
tx, ty, tz = [0, 0, d]
tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size, 1) # [B,3]
tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1]
tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3]
tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,N_PTS,3]
tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,N_PTS,3]
xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1)
return xyz_out.permute(0, 2, 1)
class FeatureProjection(nn.Module):
"""
Project the pointcloud to 2d image and get the corresponding image features at
the project location
Input:
img_feats: multi-scale image features
pc: input point clouds (in camera coordinate) [B, N, 3]
Output:
pc_feats_trans: pointcloud xyz + multi-view image features (by feature ptojection)
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.concat = wrapper(torch.cat, dim=-1)
def forward(self, img_feats, pc):
pc_feats = []
pc_feats += [self.get_projection(img_feat, pc) for img_feat in img_feats]
pc_feats_trans = self.concat(pc_feats)
return pc_feats_trans
def _project(self, img_feats, xs, ys):
x, y = xs.flatten(), ys.flatten()
idb = torch.arange(img_feats.shape[0], device=img_feats.device)
idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()
x1, y1 = torch.floor(x), torch.floor(y)
x2, y2 = torch.ceil(x), torch.ceil(y)
q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)
q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)
q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)
q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)
weights = ((x2 - x) * (y2 - y)).unsqueeze(1)
q11 *= weights
weights = ((x - x1) * (y2 - y)).unsqueeze(1)
q21 *= weights
weights = ((x2 - x) * (y - y1)).unsqueeze(1)
q12 *= weights
weights = ((x - x1) * (y - y1)).unsqueeze(1)
q22 *= weights
out = q11 + q12 + q21 + q22
return out.view(img_feats.shape[0], -1, img_feats.shape[1])
def get_projection(self, img_feat, pc):
_, _, h_, w_ = tuple(img_feat.shape)
X, Y, Z = pc[..., 0], pc[..., 1], pc[..., 2]
w = (420.*X/abs(Z) + (111.5))
h = (420.*Y/abs(Z) + (111.5))
w = torch.clamp(w, 0., 223.)
h = torch.clamp(h, 0., 223.)
x = w / (223. / (w_ - 1.))
y = h / (223. / (h_ - 1.))
feats = self._project(img_feat, x, y)
return feats
class PointNet2(nn.Module):
"""
Point cloud segmentation (set abstraction + feature propagation) in pointnet++
Input:
xyz: input points position [B, N, 3]
output:
point_feature: per-point features encode by pointnet [B, 128, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.1, nsample=64, in_channel=3, mlp=[64, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=384, radius=0.2, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=256 + 3, mlp=[256, 256, 512], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[512, 512, 1024], group_all=True)
self.fp4 = PointNetFeaturePropagation(in_channel=512 + 1024, mlp=[512, 512])
self.fp3 = PointNetFeaturePropagation(in_channel=256 + 512 , mlp=[512, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=128 + 256 , mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=0 + 128 , mlp=[128, 128, 128])
def forward(self, xyz):
xyz = xyz.transpose(2, 1) # [B, C, N]
l0_xyz = xyz
l0_points = None
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, l0_points, l1_points)
return l0_points
class LinearDisplacementNet(nn.Module):
"""
Predict the displacement from pointcloud features and image features
Input:
pc_features: poincloud features from pointnet2 [B, D, N]
proj_features: image features from feature projection [B, N, D']
noises: noises vector [B, N, n_length]
Output:
displacement: perpoint displacement [B, C, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.conv1 = nn.Conv1d(1120, 960, 1)
self.bn1 = nn.BatchNorm1d(960)
self.conv2 = nn.Conv1d(960, 512, 1)
self.bn2 = nn.BatchNorm1d(512)
self.conv3 = nn.Conv1d(512, 256, 1)
self.bn3 = nn.BatchNorm1d(256)
self.conv4 = nn.Conv1d(256, 128, 1)
self.bn4 = nn.BatchNorm1d(128)
self.conv5 = nn.Conv1d(128, 64, 1)
self.bn5 = nn.BatchNorm1d(64)
self.conv6 = nn.Conv1d(64, 3, 1)
def forward(self, transform_xyz, proj_features, pc_features, noises):
noises = noises.transpose(2, 1) # [B, n_length, N]
noises = utils.network_utils.var_or_cuda(noises)
proj_features = proj_features.transpose(2, 1) # [B, D', N]
proj_features = utils.network_utils.var_or_cuda(proj_features)
# concat the img features after each point features
refine_features = torch.cat((pc_features, proj_features, noises), 1) # [B, D+D'+n_length, N]
refine_features = F.relu(self.bn1(self.conv1(refine_features)))
refine_features = F.relu(self.bn2(self.conv2(refine_features)))
refine_features = F.relu(self.bn3(self.conv3(refine_features)))
refine_features = F.relu(self.bn4(self.conv4(refine_features)))
refine_features = F.relu(self.bn5(self.conv5(refine_features)))
displacements = self.conv6(refine_features)
displacements = F.sigmoid(displacements) * self.cfg.REFINE.RANGE_MAX * 2 - self.cfg.REFINE.RANGE_MAX
return displacements
class GRAPHX_REFINE_MODEL(nn.Module):
"""
Refine the point cloud based on the input image
Input:
xyz: point cloud from reconstruction model
Ouput:
update_pc: updated point cloud
"""
def __init__(self, cfg, in_channels, optimizer=None):
super().__init__()
self.cfg = cfg
# Refinement
self.transform_pc = TransformPC(cfg)
self.feature_projection = FeatureProjection(cfg)
self.pc_encode = PointNet2(cfg)
self.displacement_net = LinearDisplacementNet(cfg)
self.optimizer = None if optimizer is None else optimizer(self.parameters())
# emd loss
self.emd_dist = emd.emdModule()
if torch.cuda.is_available():
self.transform_pc = torch.nn.DataParallel(self.transform_pc, device_ids=cfg.CONST.DEVICE).cuda()
self.feature_projection = torch.nn.DataParallel(self.feature_projection, device_ids=cfg.CONST.DEVICE).cuda()
self.pc_encode = torch.nn.DataParallel(self.pc_encode, device_ids=cfg.CONST.DEVICE).cuda()
self.displacement_net = torch.nn.DataParallel(self.displacement_net, device_ids=cfg.CONST.DEVICE).cuda()
self.emd_dist = torch.nn.DataParallel(self.emd_dist, device_ids=cfg.CONST.DEVICE).cuda()
self.cuda()
def | (self, img_features, xyz, gt_pc, view_az, view_el):
'''
Input:
img_features
init pc: [B, N, 3]
gt pc: [B, N, 3]
view_az: [B]
view_el: [B]
Output:
loss
pred_pc: [B, N, 3]
'''
refine_pc = self.refine(img_features, xyz, view_az, view_el)
# compute reconstruction loss
emd_loss, _ = self.emd_dist(
refine_pc, gt_pc, eps=0.005, iters=50
)
rec_loss = torch.sqrt(emd_loss).mean(1).mean()
self.refiner_backward(rec_loss)
rec_loss_np = rec_loss.detach().item()
return rec_loss_np*1000
def valid_step(self, img_features, xyz, gt_pc, view_az, view_el):
# refine the point cloud
refine_pc = self.refine(img_features, xyz, view_az, view_el)
# compute reconstruction loss
emd_loss, _ = self.emd_dist(
refine_pc, gt_pc, eps=0.005, iters=50
)
rec_loss = torch.sqrt(emd_loss).mean(1).mean()
return rec_loss*1000, pred_pc
def refine(self, img_features, xyz, view_az, view_el):
# img_features = self.img_enc(img)
transform_xyz = self.transform_pc(xyz, view_az, view_el)
proj_features = self.feature_projection(img_features, transform_xyz)
pc_features = self.pc_encode(transform_xyz)
noises = torch.normal(mean=0.0, std=1, size=(self.cfg.CONST.BATCH_SIZE, self.cfg.CONST.NUM_POINTS, self.cfg.REFINE.NOISE_LENGTH))
displacements = self.displacement_net(transform_xyz, proj_features, pc_features, noises)
displacements = displacements.transpose(2, 1)
refine_pc = xyz + displacements
return refine_pc
def refiner_backward(self, rec_loss):
self.train(True)
self.optimizer.zero_grad()
rec_loss.backward()
self.optimizer.step()
| train_step |
store.service.ts | /**
* OpenAPI Petstore
* This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
*
* OpenAPI spec version: 1.0.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
/* tslint:disable:no-unused-variable member-ordering */
import { Inject, Injectable, Optional } from '@angular/core';
import { Http, Headers, URLSearchParams } from '@angular/http';
import { RequestMethod, RequestOptions, RequestOptionsArgs } from '@angular/http';
import { Response, ResponseContentType } from '@angular/http';
import { CustomQueryEncoderHelper } from '../encoder';
import { Observable } from 'rxjs/Observable';
import '../rxjs-operators';
import { Order } from '../model/order';
import { BASE_PATH, COLLECTION_FORMATS } from '../variables';
import { Configuration } from '../configuration';
@Injectable()
export class StoreService {
protected basePath = 'http://petstore.swagger.io/v2';
public defaultHeaders = new Headers();
public configuration = new Configuration();
constructor(protected http: Http, @Optional()@Inject(BASE_PATH) basePath: string, @Optional() configuration: Configuration) {
if (configuration) {
this.configuration = configuration;
this.configuration.basePath = configuration.basePath || basePath || this.basePath;
} else {
this.configuration.basePath = basePath || this.basePath;
}
}
/**
* @param consumes string[] mime-types
* @return true: consumes contains 'multipart/form-data', false: otherwise
*/
private canConsumeForm(consumes: string[]): boolean {
const form = 'multipart/form-data'; | for (const consume of consumes) {
if (form === consume) {
return true;
}
}
return false;
}
/**
* For valid response try integer IDs with value < 1000. Anything above 1000 or nonintegers will generate API errors
* @summary Delete purchase order by ID
* @param orderId ID of the order that needs to be deleted
*/
public deleteOrder(orderId: string, extraHttpRequestParams?: RequestOptionsArgs): Observable<{}> {
return this.deleteOrderWithHttpInfo(orderId, extraHttpRequestParams)
.map((response: Response) => {
if (response.status === 204) {
return undefined;
} else {
return response.json() || {};
}
});
}
/**
* Returns a map of status codes to quantities
* @summary Returns pet inventories by status
*/
public getInventory(extraHttpRequestParams?: RequestOptionsArgs): Observable<{ [key: string]: number; }> {
return this.getInventoryWithHttpInfo(extraHttpRequestParams)
.map((response: Response) => {
if (response.status === 204) {
return undefined;
} else {
return response.json() || {};
}
});
}
/**
* For valid response try integer IDs with value <= 5 or > 10. Other values will generated exceptions
* @summary Find purchase order by ID
* @param orderId ID of pet that needs to be fetched
*/
public getOrderById(orderId: number, extraHttpRequestParams?: RequestOptionsArgs): Observable<Order> {
return this.getOrderByIdWithHttpInfo(orderId, extraHttpRequestParams)
.map((response: Response) => {
if (response.status === 204) {
return undefined;
} else {
return response.json() || {};
}
});
}
/**
*
* @summary Place an order for a pet
* @param body order placed for purchasing the pet
*/
public placeOrder(body: Order, extraHttpRequestParams?: RequestOptionsArgs): Observable<Order> {
return this.placeOrderWithHttpInfo(body, extraHttpRequestParams)
.map((response: Response) => {
if (response.status === 204) {
return undefined;
} else {
return response.json() || {};
}
});
}
/**
* Delete purchase order by ID
* For valid response try integer IDs with value < 1000. Anything above 1000 or nonintegers will generate API errors
* @param orderId ID of the order that needs to be deleted
*/
public deleteOrderWithHttpInfo(orderId: string, extraHttpRequestParams?: RequestOptionsArgs): Observable<Response> {
if (orderId === null || orderId === undefined) {
throw new Error('Required parameter orderId was null or undefined when calling deleteOrder.');
}
let headers = new Headers(this.defaultHeaders.toJSON()); // https://github.com/angular/angular/issues/6845
// to determine the Accept header
const httpHeaderAccepts: string[] = [
];
const httpHeaderAcceptSelected: string | undefined = this.configuration.selectHeaderAccept(httpHeaderAccepts);
if (httpHeaderAcceptSelected !== undefined) {
headers.set('Accept', httpHeaderAcceptSelected);
}
// to determine the Content-Type header
const consumes: string[] = [
];
let requestOptions: RequestOptionsArgs = new RequestOptions({
method: RequestMethod.Delete,
headers: headers,
withCredentials:this.configuration.withCredentials
});
// issues#4037
if (extraHttpRequestParams) {
requestOptions = (<any>Object).assign(requestOptions, extraHttpRequestParams);
}
return this.http.request(`${this.configuration.basePath}/store/order/${encodeURIComponent(String(orderId))}`, requestOptions);
}
/**
* Returns pet inventories by status
* Returns a map of status codes to quantities
*/
public getInventoryWithHttpInfo(extraHttpRequestParams?: RequestOptionsArgs): Observable<Response> {
let headers = new Headers(this.defaultHeaders.toJSON()); // https://github.com/angular/angular/issues/6845
// authentication (api_key) required
if (this.configuration.apiKeys && this.configuration.apiKeys["api_key"]) {
headers.set('api_key', this.configuration.apiKeys["api_key"]);
}
// to determine the Accept header
const httpHeaderAccepts: string[] = [
'application/json'
];
const httpHeaderAcceptSelected: string | undefined = this.configuration.selectHeaderAccept(httpHeaderAccepts);
if (httpHeaderAcceptSelected !== undefined) {
headers.set('Accept', httpHeaderAcceptSelected);
}
// to determine the Content-Type header
const consumes: string[] = [
];
let requestOptions: RequestOptionsArgs = new RequestOptions({
method: RequestMethod.Get,
headers: headers,
withCredentials:this.configuration.withCredentials
});
// issues#4037
if (extraHttpRequestParams) {
requestOptions = (<any>Object).assign(requestOptions, extraHttpRequestParams);
}
return this.http.request(`${this.configuration.basePath}/store/inventory`, requestOptions);
}
/**
* Find purchase order by ID
* For valid response try integer IDs with value <= 5 or > 10. Other values will generated exceptions
* @param orderId ID of pet that needs to be fetched
*/
public getOrderByIdWithHttpInfo(orderId: number, extraHttpRequestParams?: RequestOptionsArgs): Observable<Response> {
if (orderId === null || orderId === undefined) {
throw new Error('Required parameter orderId was null or undefined when calling getOrderById.');
}
let headers = new Headers(this.defaultHeaders.toJSON()); // https://github.com/angular/angular/issues/6845
// to determine the Accept header
const httpHeaderAccepts: string[] = [
'application/xml',
'application/json'
];
const httpHeaderAcceptSelected: string | undefined = this.configuration.selectHeaderAccept(httpHeaderAccepts);
if (httpHeaderAcceptSelected !== undefined) {
headers.set('Accept', httpHeaderAcceptSelected);
}
// to determine the Content-Type header
const consumes: string[] = [
];
let requestOptions: RequestOptionsArgs = new RequestOptions({
method: RequestMethod.Get,
headers: headers,
withCredentials:this.configuration.withCredentials
});
// issues#4037
if (extraHttpRequestParams) {
requestOptions = (<any>Object).assign(requestOptions, extraHttpRequestParams);
}
return this.http.request(`${this.configuration.basePath}/store/order/${encodeURIComponent(String(orderId))}`, requestOptions);
}
/**
* Place an order for a pet
*
* @param body order placed for purchasing the pet
*/
public placeOrderWithHttpInfo(body: Order, extraHttpRequestParams?: RequestOptionsArgs): Observable<Response> {
if (body === null || body === undefined) {
throw new Error('Required parameter body was null or undefined when calling placeOrder.');
}
let headers = new Headers(this.defaultHeaders.toJSON()); // https://github.com/angular/angular/issues/6845
// to determine the Accept header
const httpHeaderAccepts: string[] = [
'application/xml',
'application/json'
];
const httpHeaderAcceptSelected: string | undefined = this.configuration.selectHeaderAccept(httpHeaderAccepts);
if (httpHeaderAcceptSelected !== undefined) {
headers.set('Accept', httpHeaderAcceptSelected);
}
// to determine the Content-Type header
const consumes: string[] = [
];
const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes);
if (httpContentTypeSelected !== undefined) {
headers.set('Content-Type', httpContentTypeSelected);
}
let requestOptions: RequestOptionsArgs = new RequestOptions({
method: RequestMethod.Post,
headers: headers,
body: body == null ? '' : JSON.stringify(body), // https://github.com/angular/angular/issues/10612
withCredentials:this.configuration.withCredentials
});
// issues#4037
if (extraHttpRequestParams) {
requestOptions = (<any>Object).assign(requestOptions, extraHttpRequestParams);
}
return this.http.request(`${this.configuration.basePath}/store/order`, requestOptions);
}
} | |
triehash.rs | // Copyright 2020 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use criterion::{criterion_group, criterion_main, Criterion};
use vapory_types::H256;
use keccak_hasher::KeccakHasher;
use tiny_keccak::{Hasher, Keccak};
use trie_standardmap::{Alphabet, StandardMap, ValueMode};
use tetsy_triehash::trie_root;
fn keccak256(input: &[u8]) -> [u8; 32] {
let mut keccak256 = Keccak::v256();
let mut out = [0u8; 32];
keccak256.update(input);
keccak256.finalize(&mut out);
out
}
fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> |
fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + diff_count <= 32);
*seed = H256(keccak256(seed.as_bytes()));
let r = min_count + (seed[31] as usize % (diff_count + 1));
seed[0..r].to_vec()
}
fn random_value(seed: &mut H256) -> Vec<u8> {
*seed = H256(keccak256(seed.as_bytes()));
match seed[0] % 2 {
1 => vec![seed[31]; 1],
_ => seed.as_bytes().to_vec(),
}
}
fn bench_insertions(c: &mut Criterion) {
c.bench_function("32_mir_1k", |b| {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Mirror,
count: 1000,
};
let d = st.make();
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
c.bench_function("32_ran_1k", |b| {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Random,
count: 1000,
};
let d = st.make();
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
c.bench_function("six_high", |b| {
let mut d: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
let mut seed = H256::default();
for _ in 0..1000 {
let k = random_bytes(6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
c.bench_function("six_mid", |b| {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
let mut seed = H256::default();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
c.bench_function("random_mid", |b| {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
let mut seed = H256::default();
for _ in 0..1000 {
let k = random_word(alphabet, 1, 5, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
c.bench_function("six_low", |b| {
let alphabet = b"abcdef";
let mut d: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
let mut seed = H256::default();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(|| trie_root::<KeccakHasher, _, _, _>(d.clone()));
});
}
criterion_group!(benches, bench_insertions);
criterion_main!(benches);
| {
assert!(min_count + diff_count <= 32);
*seed = H256(keccak256(seed.as_bytes()));
let r = min_count + (seed[31] as usize % (diff_count + 1));
let mut ret: Vec<u8> = Vec::with_capacity(r);
for i in 0..r {
ret.push(alphabet[seed[i] as usize % alphabet.len()]);
}
ret
} |
resources.js | (function () {
function QueryResultError(errorMessage) {
this.errorMessage = errorMessage;
}
QueryResultError.prototype.getError = function() {
return this.errorMessage;
};
QueryResultError.prototype.getStatus = function() {
return 'failed';
};
QueryResultError.prototype.getData = function() {
return null;
};
QueryResultError.prototype.getLog = function() {
return null;
};
QueryResultError.prototype.getChartData = function() {
return null;
};
var QueryResult = function ($resource, $timeout, $q) {
var QueryResultResource = $resource('/api/query_results/:id', {id: '@id'}, {'post': {'method': 'POST'}});
var Job = $resource('/api/jobs/:id', {id: '@id'});
var updateFunction = function (props) {
angular.extend(this, props);
if ('query_result' in props) {
this.status = "done";
this.filters = undefined;
this.filterFreeze = undefined;
var columnTypes = {};
// TODO: we should stop manipulating incoming data, and switch to relaying on the column type set by the backend.
// This logic is prone to errors, and better be removed. Kept for now, for backward compatability.
_.each(this.query_result.data.rows, function (row) {
_.each(row, function (v, k) {
if (angular.isNumber(v)) {
columnTypes[k] = 'float';
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}T/)) {
row[k] = moment.utc(v);
columnTypes[k] = 'datetime';
} else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}$/)) {
row[k] = moment.utc(v);
columnTypes[k] = 'date';
} else if (typeof(v) == 'object' && v !== null) {
row[k] = JSON.stringify(v); | }, this);
_.each(this.query_result.data.columns, function(column) {
if (columnTypes[column.name]) {
if (column.type == null || column.type == 'string') {
column.type = columnTypes[column.name];
}
}
});
this.deferred.resolve(this);
} else if (this.job.status == 3) {
this.status = "processing";
} else {
this.status = undefined;
}
};
function QueryResult(props) {
this.deferred = $q.defer();
this.job = {};
this.query_result = {};
this.status = "waiting";
this.filters = undefined;
this.filterFreeze = undefined;
this.updatedAt = moment();
if (props) {
updateFunction.apply(this, [props]);
}
}
var statuses = {
1: "waiting",
2: "processing",
3: "done",
4: "failed"
}
QueryResult.prototype.update = updateFunction;
QueryResult.prototype.getId = function () {
var id = null;
if ('query_result' in this) {
id = this.query_result.id;
}
return id;
}
QueryResult.prototype.cancelExecution = function () {
Job.delete({id: this.job.id});
}
QueryResult.prototype.getStatus = function () {
return this.status || statuses[this.job.status];
}
QueryResult.prototype.getError = function () {
// TODO: move this logic to the server...
if (this.job.error == "None") {
return undefined;
}
return this.job.error;
}
QueryResult.prototype.getLog = function() {
if (!this.query_result.data || !this.query_result.data.log || this.query_result.data.log.length == 0) {
return null;
}
return this.query_result.data.log;
}
QueryResult.prototype.getUpdatedAt = function () {
return this.query_result.retrieved_at || this.job.updated_at * 1000.0 || this.updatedAt;
}
QueryResult.prototype.getRuntime = function () {
return this.query_result.runtime;
}
QueryResult.prototype.getRawData = function () {
if (!this.query_result.data) {
return null;
}
var data = this.query_result.data.rows;
return data;
}
QueryResult.prototype.getData = function () {
if (!this.query_result.data) {
return null;
}
var filterValues = function (filters) {
if (!filters) {
return null;
}
return _.reduce(filters, function (str, filter) {
return str + filter.current;
}, "")
}
var filters = this.getFilters();
var filterFreeze = filterValues(filters);
if (this.filterFreeze != filterFreeze) {
this.filterFreeze = filterFreeze;
if (filters) {
this.filteredData = _.filter(this.query_result.data.rows, function (row) {
return _.reduce(filters, function (memo, filter) {
if (!_.isArray(filter.current)) {
filter.current = [filter.current];
};
return (memo && _.some(filter.current, function(v) {
// We compare with either the value or the String representation of the value,
// because Select2 casts true/false to "true"/"false".
return v == row[filter.name] || String(row[filter.name]) == v
}));
}, true);
});
} else {
this.filteredData = this.query_result.data.rows;
}
}
return this.filteredData;
};
/**
* Helper function to add a point into a series
*/
QueryResult.prototype._addPointToSeries = function (point, seriesCollection, seriesName) {
if (seriesCollection[seriesName] == undefined) {
seriesCollection[seriesName] = {
name: seriesName,
type: 'column',
data: []
};
}
seriesCollection[seriesName]['data'].push(point);
};
QueryResult.prototype.getChartData = function (mapping) {
var series = {};
_.each(this.getData(), function (row) {
var point = {};
var seriesName = undefined;
var xValue = 0;
var yValues = {};
_.each(row, function (value, definition) {
var name = definition.split("::")[0] || definition.split("__")[0];
var type = definition.split("::")[1] || definition.split("__")[1];
if (mapping) {
type = mapping[definition];
}
if (type == 'unused') {
return;
}
if (type == 'x') {
xValue = value;
point[type] = value;
}
if (type == 'y') {
if (value == null) {
value = 0;
}
yValues[name] = value;
point[type] = value;
}
if (type == 'series') {
seriesName = String(value);
}
if (type == 'multiFilter' || type == 'multi-filter') {
seriesName = String(value);
}
});
if (seriesName === undefined) {
_.each(yValues, function (yValue, seriesName) {
this._addPointToSeries({'x': xValue, 'y': yValue}, series, seriesName);
}.bind(this));
}
else {
this._addPointToSeries(point, series, seriesName);
}
}.bind(this));
return _.values(series);
};
QueryResult.prototype.getColumns = function () {
if (this.columns == undefined && this.query_result.data) {
this.columns = this.query_result.data.columns;
}
return this.columns;
}
QueryResult.prototype.getColumnNames = function () {
if (this.columnNames == undefined && this.query_result.data) {
this.columnNames = _.map(this.query_result.data.columns, function (v) {
return v.name;
});
}
return this.columnNames;
}
QueryResult.prototype.getColumnNameWithoutType = function (column) {
var typeSplit;
if (column.indexOf("::") != -1) {
typeSplit = "::";
} else if (column.indexOf("__" != -1)) {
typeSplit = "__";
} else {
return column;
}
var parts = column.split(typeSplit);
if (parts[0] == "" && parts.length == 2) {
return parts[1];
}
return parts[0];
};
QueryResult.prototype.getColumnCleanName = function (column) {
var name = this.getColumnNameWithoutType(column);
return name;
}
QueryResult.prototype.getColumnFriendlyName = function (column) {
return this.getColumnNameWithoutType(column).replace(/(?:^|\s)\S/g, function (a) {
return a.toUpperCase();
});
}
QueryResult.prototype.getColumnCleanNames = function () {
return _.map(this.getColumnNames(), function (col) {
return this.getColumnCleanName(col);
}, this);
}
QueryResult.prototype.getColumnFriendlyNames = function () {
return _.map(this.getColumnNames(), function (col) {
return this.getColumnFriendlyName(col);
}, this);
}
QueryResult.prototype.getFilters = function () {
if (!this.filters) {
this.prepareFilters();
}
return this.filters;
};
QueryResult.prototype.prepareFilters = function () {
var filters = [];
var filterTypes = ['filter', 'multi-filter', 'multiFilter'];
_.each(this.getColumnNames(), function (col) {
var type = col.split('::')[1] || col.split('__')[1];
if (_.contains(filterTypes, type)) {
// filter found
var filter = {
name: col,
friendlyName: this.getColumnFriendlyName(col),
values: [],
multiple: (type=='multiFilter') || (type=='multi-filter')
}
filters.push(filter);
}
}, this);
_.each(this.getRawData(), function (row) {
_.each(filters, function (filter) {
filter.values.push(row[filter.name]);
if (filter.values.length == 1) {
filter.current = row[filter.name];
}
})
});
_.each(filters, function(filter) {
filter.values = _.uniq(filter.values);
});
this.filters = filters;
}
var refreshStatus = function (queryResult, query) {
Job.get({'id': queryResult.job.id}, function (response) {
queryResult.update(response);
if (queryResult.getStatus() == "processing" && queryResult.job.query_result_id && queryResult.job.query_result_id != "None") {
QueryResultResource.get({'id': queryResult.job.query_result_id}, function (response) {
queryResult.update(response);
});
} else if (queryResult.getStatus() != "failed") {
$timeout(function () {
refreshStatus(queryResult, query);
}, 3000);
}
})
}
QueryResult.getById = function (id) {
var queryResult = new QueryResult();
QueryResultResource.get({'id': id}, function (response) {
queryResult.update(response);
});
return queryResult;
};
QueryResult.prototype.toPromise = function() {
return this.deferred.promise;
}
QueryResult.get = function (data_source_id, query, maxAge, queryId) {
var queryResult = new QueryResult();
var params = {'data_source_id': data_source_id, 'query': query, 'max_age': maxAge};
if (queryId !== undefined) {
params['query_id'] = queryId;
};
QueryResultResource.post(params, function (response) {
queryResult.update(response);
if ('job' in response) {
refreshStatus(queryResult, query);
}
});
return queryResult;
}
return QueryResult;
};
var Query = function ($resource, QueryResult, DataSource) {
var Query = $resource('/api/queries/:id', {id: '@id'},
{
search: {
method: 'get',
isArray: true,
url: "/api/queries/search"
},
recent: {
method: 'get',
isArray: true,
url: "/api/queries/recent"
}});
Query.newQuery = function () {
return new Query({
query: "",
name: "New Query",
schedule: null,
user: currentUser
});
};
Query.collectParamsFromQueryString = function($location, query) {
var parameterNames = query.getParameters();
var parameters = {};
var queryString = $location.search();
_.each(parameterNames, function(param, i) {
var qsName = "p_" + param;
if (qsName in queryString) {
parameters[param] = queryString[qsName];
}
});
return parameters;
};
Query.prototype.getSourceLink = function () {
return '/queries/' + this.id + '/source';
};
Query.prototype.isNew = function() {
return this.id === undefined;
};
Query.prototype.hasDailySchedule = function() {
return (this.schedule && this.schedule.match(/\d\d:\d\d/) !== null);
};
Query.prototype.scheduleInLocalTime = function() {
var parts = this.schedule.split(':');
return moment.utc().hour(parts[0]).minute(parts[1]).local().format('HH:mm');
};
Query.prototype.getQueryResult = function (maxAge, parameters) {
if (!this.query) {
return;
}
var queryText = this.query;
var queryParameters = this.getParameters();
var paramsRequired = !_.isEmpty(queryParameters);
var missingParams = parameters === undefined ? queryParameters : _.difference(queryParameters, _.keys(parameters));
if (paramsRequired && missingParams.length > 0) {
var paramsWord = "parameter";
if (missingParams.length > 1) {
paramsWord = "parameters";
}
return new QueryResult({job: {error: "Missing values for " + missingParams.join(', ') + " "+paramsWord+".", status: 4}});
}
if (paramsRequired) {
queryText = Mustache.render(queryText, parameters);
// Need to clear latest results, to make sure we don't used results for different params.
this.latest_query_data = null;
this.latest_query_data_id = null;
}
if (this.latest_query_data && maxAge != 0) {
if (!this.queryResult) {
this.queryResult = new QueryResult({'query_result': this.latest_query_data});
}
} else if (this.latest_query_data_id && maxAge != 0) {
if (!this.queryResult) {
this.queryResult = QueryResult.getById(this.latest_query_data_id);
}
} else if (this.data_source_id) {
this.queryResult = QueryResult.get(this.data_source_id, queryText, maxAge, this.id);
} else {
return new QueryResultError("Please select data source to run this query.");
}
return this.queryResult;
};
Query.prototype.getQueryResultPromise = function() {
return this.getQueryResult().toPromise();
};
Query.prototype.getParameters = function() {
var parts = Mustache.parse(this.query);
var parameters = [];
var collectParams = function(parts) {
parameters = [];
_.each(parts, function(part) {
if (part[0] == 'name' || part[0] == '&') {
parameters.push(part[1]);
} else if (part[0] == '#') {
parameters = _.union(parameters, collectParams(part[4]));
}
});
return parameters;
};
parameters = collectParams(parts);
return parameters;
}
return Query;
};
var DataSource = function ($resource) {
var actions = {
'get': {'method': 'GET', 'cache': false, 'isArray': false},
'query': {'method': 'GET', 'cache': false, 'isArray': true},
'getSchema': {'method': 'GET', 'cache': true, 'isArray': true, 'url': '/api/data_sources/:id/schema'}
};
var DataSourceResource = $resource('/api/data_sources/:id', {id: '@id'}, actions);
return DataSourceResource;
};
var User = function ($resource, $http) {
var transformSingle = function(user) {
if (user.groups !== undefined) {
user.admin = user.groups.indexOf("admin") != -1;
}
};
var transform = $http.defaults.transformResponse.concat(function(data, headers) {
if (_.isArray(data)) {
_.each(data, transformSingle);
} else {
transformSingle(data);
}
return data;
});
var actions = {
'get': {method: 'GET', transformResponse: transform},
'save': {method: 'POST', transformResponse: transform},
'query': {method: 'GET', isArray: true, transformResponse: transform},
'delete': {method: 'DELETE', transformResponse: transform}
};
var UserResource = $resource('/api/users/:id', {id: '@id'}, actions);
return UserResource;
};
var AlertSubscription = function ($resource) {
var resource = $resource('/api/alerts/:alertId/subscriptions/:userId', {alertId: '@alert_id', userId: '@user.id'});
return resource;
};
var Alert = function ($resource, $http) {
var actions = {
save: {
method: 'POST',
transformRequest: [function(data) {
var newData = _.extend({}, data);
if (newData.query_id === undefined) {
newData.query_id = newData.query.id;
delete newData.query;
}
return newData;
}].concat($http.defaults.transformRequest)
}
};
var resource = $resource('/api/alerts/:id', {id: '@id'}, actions);
return resource;
};
var Widget = function ($resource, Query) {
var WidgetResource = $resource('/api/widgets/:id', {id: '@id'});
WidgetResource.prototype.getQuery = function () {
if (!this.query && this.visualization) {
this.query = new Query(this.visualization.query);
}
return this.query;
};
WidgetResource.prototype.getName = function () {
if (this.visualization) {
return this.visualization.query.name + ' (' + this.visualization.name + ')';
}
return _.str.truncate(this.text, 20);
};
return WidgetResource;
}
angular.module('redash.services')
.factory('QueryResult', ['$resource', '$timeout', '$q', QueryResult])
.factory('Query', ['$resource', 'QueryResult', 'DataSource', Query])
.factory('DataSource', ['$resource', DataSource])
.factory('Alert', ['$resource', '$http', Alert])
.factory('AlertSubscription', ['$resource', AlertSubscription])
.factory('Widget', ['$resource', 'Query', Widget])
.factory('User', ['$resource', '$http', User]);
})(); | }
}, this); |
constants.py | #coding:utf-8
# DeviceActiveListKeyHash = 'blue_earth.device.active.list' # 存放所有上线设备id {a:Time,b:Time}
#
DeviceCommandQueue = 'smartbox.device.command.queue.{device_type}.{device_id}'
#
# DeviceSequence = 'blue_earth.device.sequence'
DeviceChannelPub = 'smartbox.device.channel.pub.{device_id}' # 设备所有原始数据读取之后分发的通道
DeviceAppChannelPub = 'smartbox.device.app.channel.pub.{device_id}' # 设备监控应用通道,所有的前端系统将订阅此通道将监控信息推送到前端App
# DeviceChannelPubIoT = 'smartbox.device_channel_iot.{device_id}' # 推送到绿城+的发布通道
DeviceChannelPubIoT = '{device_id}' # 推送到绿城+的发布通道
DeviceChannelPubTraverseDown = 'smartbox.down.pub.{device_id}' # 设备下发控制命令的通道
DeviceChannelPubTraverseUp = 'smartbox.up.pub.{device_id}' # 设备上行消息分发通道
# DevicePositionLastest = 'blue_earth.device.position.lastest.{device_id}' # 设备当前的坐标和运行信息
#
# DevicePositionRequestTimeKey = 'blue_earth.device.position.request.time.{}' # 发送定位设备请求命令的时间
#
# DeviceLandingServerKey = 'blue_earth.device.landing_server.{}' # 记录设备接入服务器 {url,landing_time}
#
# DeviceShareCodeCreateTimeKey = 'blue_earth.device.share_code.create_time.{}' # 分享码的生成时间
#
MaxLiveTimeDeviceLandingServerKey = 60*8
DeviceAccessHttpAPI = 'smartbox.device.api_server.{}' # 记录设备接入服务器 {url,landing_time}
# DeviceActiveListKeyHash = 'smartbox.active_device_list' # 存放所有上线设备与接入服务器的关联关系
DeviceServerRel = 'smartbox.device_server_rel' # 存放所有上线设备与接入服务器的关联关系
|
AppRequestAuthCodeWidthIdsPrefix = 'smartbox.authcode.ids.'
AppRequestAuthCodePrefix = 'smartbox.authcode.data.' | SensorStatusHash= 'smartbox.sensor.status.{device_id}.{sensor_type}.{sensor_id}' # {device_id}_{sensor_type}_{sensor_id}'
DeviceStatusHash = 'smartbox.device.status.{device_id}' |
helpers_test.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"os"
"strings"
"testing"
"time"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
criconfig "github.com/containerd/containerd/pkg/cri/config"
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
imagestore "github.com/containerd/containerd/pkg/cri/store/image"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/reference/docker"
"github.com/containerd/containerd/runtime/linux/runctypes"
runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
imagedigest "github.com/opencontainers/go-digest"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pelletier/go-toml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestGetUserFromImage tests the logic of getting image uid or user name of image user.
func TestGetUserFromImage(t *testing.T) {
newI64 := func(i int64) *int64 { return &i }
for c, test := range map[string]struct {
user string
uid *int64
name string
}{
"no gid": {
user: "0",
uid: newI64(0),
},
"uid/gid": {
user: "0:1",
uid: newI64(0),
},
"empty user": {
user: "",
},
"multiple separators": {
user: "1:2:3",
uid: newI64(1),
},
"root username": {
user: "root:root",
name: "root",
},
"username": {
user: "test:test",
name: "test",
},
} {
t.Logf("TestCase - %q", c)
actualUID, actualName := getUserFromImage(test.user)
assert.Equal(t, test.uid, actualUID)
assert.Equal(t, test.name, actualName)
}
}
func TestGetRepoDigestAndTag(t *testing.T) {
digest := imagedigest.Digest("sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582")
for desc, test := range map[string]struct {
ref string
schema1 bool
expectedRepoDigest string
expectedRepoTag string
}{
"repo tag should be empty if original ref has no tag": {
ref: "gcr.io/library/busybox@" + digest.String(),
expectedRepoDigest: "gcr.io/library/busybox@" + digest.String(),
},
"repo tag should not be empty if original ref has tag": {
ref: "gcr.io/library/busybox:latest",
expectedRepoDigest: "gcr.io/library/busybox@" + digest.String(),
expectedRepoTag: "gcr.io/library/busybox:latest",
},
"repo digest should be empty if original ref is schema1 and has no digest": {
ref: "gcr.io/library/busybox:latest",
schema1: true,
expectedRepoDigest: "",
expectedRepoTag: "gcr.io/library/busybox:latest",
},
"repo digest should not be empty if original ref is schema1 but has digest": {
ref: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59594",
schema1: true,
expectedRepoDigest: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59594",
expectedRepoTag: "",
},
} {
t.Logf("TestCase %q", desc)
named, err := docker.ParseDockerRef(test.ref)
assert.NoError(t, err)
repoDigest, repoTag := getRepoDigestAndTag(named, digest, test.schema1)
assert.Equal(t, test.expectedRepoDigest, repoDigest)
assert.Equal(t, test.expectedRepoTag, repoTag)
}
}
func TestBuildLabels(t *testing.T) {
imageConfigLabels := map[string]string{
"a": "z",
"d": "y",
"long-label": strings.Repeat("example", 10000),
}
configLabels := map[string]string{
"a": "b",
"c": "d",
}
newLabels := buildLabels(configLabels, imageConfigLabels, containerKindSandbox)
assert.Len(t, newLabels, 4)
assert.Equal(t, "b", newLabels["a"])
assert.Equal(t, "d", newLabels["c"])
assert.Equal(t, "y", newLabels["d"])
assert.Equal(t, containerKindSandbox, newLabels[containerKindLabel])
assert.NotContains(t, newLabels, "long-label")
newLabels["a"] = "e"
assert.Empty(t, configLabels[containerKindLabel], "should not add new labels into original label")
assert.Equal(t, "b", configLabels["a"], "change in new labels should not affect original label")
}
func TestParseImageReferences(t *testing.T) {
refs := []string{
"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
"gcr.io/library/busybox:1.2",
"sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
"arbitrary-ref",
}
expectedTags := []string{
"gcr.io/library/busybox:1.2",
}
expectedDigests := []string{"gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582"}
tags, digests := parseImageReferences(refs)
assert.Equal(t, expectedTags, tags)
assert.Equal(t, expectedDigests, digests)
}
func TestLocalResolve(t *testing.T) {
image := imagestore.Image{
ID: "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
ChainID: "test-chain-id-1",
References: []string{
"docker.io/library/busybox:latest",
"docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
},
Size: 10,
}
c := newTestCRIService()
var err error
c.imageStore, err = imagestore.NewFakeStore([]imagestore.Image{image})
assert.NoError(t, err)
for _, ref := range []string{
"sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
"busybox",
"busybox:latest",
"busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
"library/busybox",
"library/busybox:latest",
"library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
"docker.io/busybox",
"docker.io/busybox:latest",
"docker.io/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
"docker.io/library/busybox",
"docker.io/library/busybox:latest",
"docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582",
} {
img, err := c.localResolve(ref)
assert.NoError(t, err)
assert.Equal(t, image, img)
}
img, err := c.localResolve("randomid")
assert.Equal(t, errdefs.IsNotFound(err), true)
assert.Equal(t, imagestore.Image{}, img)
}
func TestGenerateRuntimeOptions(t *testing.T) {
nilOpts := `
systemd_cgroup = true
[containerd]
no_pivot = true
default_runtime_name = "default"
[containerd.runtimes.legacy]
runtime_type = "` + plugin.RuntimeLinuxV1 + `"
[containerd.runtimes.runc]
runtime_type = "` + plugin.RuntimeRuncV1 + `"
[containerd.runtimes.runcv2]
runtime_type = "` + plugin.RuntimeRuncV2 + `"
`
nonNilOpts := `
systemd_cgroup = true
[containerd]
no_pivot = true
default_runtime_name = "default"
[containerd.runtimes.legacy]
runtime_type = "` + plugin.RuntimeLinuxV1 + `"
[containerd.runtimes.legacy.options]
Runtime = "legacy"
RuntimeRoot = "/legacy"
[containerd.runtimes.runc]
runtime_type = "` + plugin.RuntimeRuncV1 + `"
[containerd.runtimes.runc.options]
BinaryName = "runc"
Root = "/runc"
NoNewKeyring = true
[containerd.runtimes.runcv2]
runtime_type = "` + plugin.RuntimeRuncV2 + `"
[containerd.runtimes.runcv2.options]
BinaryName = "runc"
Root = "/runcv2"
NoNewKeyring = true
`
var nilOptsConfig, nonNilOptsConfig criconfig.Config
tree, err := toml.Load(nilOpts)
require.NoError(t, err)
err = tree.Unmarshal(&nilOptsConfig)
require.NoError(t, err)
require.Len(t, nilOptsConfig.Runtimes, 3)
tree, err = toml.Load(nonNilOpts)
require.NoError(t, err)
err = tree.Unmarshal(&nonNilOptsConfig)
require.NoError(t, err)
require.Len(t, nonNilOptsConfig.Runtimes, 3)
for desc, test := range map[string]struct {
r criconfig.Runtime
c criconfig.Config
expectedOptions interface{}
}{
"when options is nil, should return nil option for io.containerd.runc.v1": {
r: nilOptsConfig.Runtimes["runc"],
c: nilOptsConfig,
expectedOptions: nil,
},
"when options is nil, should return nil option for io.containerd.runc.v2": {
r: nilOptsConfig.Runtimes["runcv2"],
c: nilOptsConfig,
expectedOptions: nil,
},
"when options is nil, should use legacy fields for legacy runtime": {
r: nilOptsConfig.Runtimes["legacy"],
c: nilOptsConfig,
expectedOptions: &runctypes.RuncOptions{
SystemdCgroup: true,
},
},
"when options is not nil, should be able to decode for io.containerd.runc.v1": {
r: nonNilOptsConfig.Runtimes["runc"],
c: nonNilOptsConfig,
expectedOptions: &runcoptions.Options{
BinaryName: "runc",
Root: "/runc",
NoNewKeyring: true,
},
},
"when options is not nil, should be able to decode for io.containerd.runc.v2": {
r: nonNilOptsConfig.Runtimes["runcv2"],
c: nonNilOptsConfig,
expectedOptions: &runcoptions.Options{
BinaryName: "runc",
Root: "/runcv2",
NoNewKeyring: true,
},
},
"when options is not nil, should be able to decode for legacy runtime": {
r: nonNilOptsConfig.Runtimes["legacy"],
c: nonNilOptsConfig,
expectedOptions: &runctypes.RuncOptions{
Runtime: "legacy",
RuntimeRoot: "/legacy",
},
},
} {
t.Run(desc, func(t *testing.T) {
opts, err := generateRuntimeOptions(test.r, test.c)
assert.NoError(t, err)
assert.Equal(t, test.expectedOptions, opts)
})
}
}
func TestEnvDeduplication(t *testing.T) {
for desc, test := range map[string]struct {
existing []string
kv [][2]string
expected []string
}{
"single env": {
kv: [][2]string{
{"a", "b"},
},
expected: []string{"a=b"},
},
"multiple envs": {
kv: [][2]string{
{"a", "b"},
{"c", "d"},
{"e", "f"},
},
expected: []string{
"a=b",
"c=d",
"e=f",
},
},
"env override": {
kv: [][2]string{
{"k1", "v1"},
{"k2", "v2"},
{"k3", "v3"},
{"k3", "v4"},
{"k1", "v5"},
{"k4", "v6"},
},
expected: []string{
"k1=v5",
"k2=v2",
"k3=v4",
"k4=v6",
},
},
"existing env": {
existing: []string{
"k1=v1",
"k2=v2",
"k3=v3",
},
kv: [][2]string{
{"k3", "v4"},
{"k2", "v5"},
{"k4", "v6"},
},
expected: []string{
"k1=v1",
"k2=v5",
"k3=v4",
"k4=v6",
},
},
} {
t.Logf("TestCase %q", desc)
var spec runtimespec.Spec
if len(test.existing) > 0 {
spec.Process = &runtimespec.Process{
Env: test.existing,
}
}
for _, kv := range test.kv {
oci.WithEnv([]string{kv[0] + "=" + kv[1]})(context.Background(), nil, nil, &spec)
}
assert.Equal(t, test.expected, spec.Process.Env)
}
}
func TestPassThroughAnnotationsFilter(t *testing.T) {
for desc, test := range map[string]struct {
podAnnotations map[string]string
runtimePodAnnotations []string
passthroughAnnotations map[string]string
}{
"should support direct match": {
podAnnotations: map[string]string{"c": "d", "d": "e"},
runtimePodAnnotations: []string{"c"},
passthroughAnnotations: map[string]string{"c": "d"},
},
"should support wildcard match": {
podAnnotations: map[string]string{
"t.f": "j",
"z.g": "o",
"z": "o",
"y.ca": "b",
"y": "b",
},
runtimePodAnnotations: []string{"*.f", "z*g", "y.c*"},
passthroughAnnotations: map[string]string{
"t.f": "j",
"z.g": "o",
"y.ca": "b",
},
},
"should support wildcard match all": {
podAnnotations: map[string]string{
"t.f": "j",
"z.g": "o",
"z": "o",
"y.ca": "b",
"y": "b",
},
runtimePodAnnotations: []string{"*"},
passthroughAnnotations: map[string]string{
"t.f": "j",
"z.g": "o",
"z": "o",
"y.ca": "b",
"y": "b",
},
},
"should support match including path separator": {
podAnnotations: map[string]string{
"matchend.com/end": "1",
"matchend.com/end1": "2",
"matchend.com/1end": "3",
"matchmid.com/mid": "4",
"matchmid.com/mi1d": "5",
"matchmid.com/mid1": "6",
"matchhead.com/head": "7",
"matchhead.com/1head": "8",
"matchhead.com/head1": "9",
"matchall.com/abc": "10",
"matchall.com/def": "11",
"end/matchend": "12",
"end1/matchend": "13",
"1end/matchend": "14",
"mid/matchmid": "15",
"mi1d/matchmid": "16",
"mid1/matchmid": "17",
"head/matchhead": "18",
"1head/matchhead": "19",
"head1/matchhead": "20",
"abc/matchall": "21",
"def/matchall": "22",
"match1/match2": "23",
"nomatch/nomatch": "24",
},
runtimePodAnnotations: []string{
"matchend.com/end*",
"matchmid.com/mi*d",
"matchhead.com/*head",
"matchall.com/*",
"end*/matchend",
"mi*d/matchmid",
"*head/matchhead",
"*/matchall",
"match*/match*",
},
passthroughAnnotations: map[string]string{
"matchend.com/end": "1",
"matchend.com/end1": "2",
"matchmid.com/mid": "4",
"matchmid.com/mi1d": "5",
"matchhead.com/head": "7",
"matchhead.com/1head": "8",
"matchall.com/abc": "10",
"matchall.com/def": "11",
"end/matchend": "12",
"end1/matchend": "13",
"mid/matchmid": "15",
"mi1d/matchmid": "16",
"head/matchhead": "18",
"1head/matchhead": "19",
"abc/matchall": "21",
"def/matchall": "22",
"match1/match2": "23",
},
},
} {
t.Run(desc, func(t *testing.T) {
passthroughAnnotations := getPassthroughAnnotations(test.podAnnotations, test.runtimePodAnnotations)
assert.Equal(t, test.passthroughAnnotations, passthroughAnnotations)
})
}
}
func TestEnsureRemoveAllNotExist(t *testing.T) {
// should never return an error for a non-existent path
if err := ensureRemoveAll(context.Background(), "/non/existent/path"); err != nil {
t.Fatal(err)
}
}
func TestEnsureRemoveAllWithDir(t *testing.T) {
dir := t.TempDir()
if err := ensureRemoveAll(context.Background(), dir); err != nil { | }
func TestEnsureRemoveAllWithFile(t *testing.T) {
tmp, err := os.CreateTemp("", "test-ensure-removeall-with-dir")
if err != nil {
t.Fatal(err)
}
tmp.Close()
if err := ensureRemoveAll(context.Background(), tmp.Name()); err != nil {
t.Fatal(err)
}
}
// Helper function for setting up an environment to test PID namespace targeting.
func addContainer(c *criService, containerID, sandboxID string, PID uint32, createdAt, startedAt, finishedAt int64) error {
meta := containerstore.Metadata{
ID: containerID,
SandboxID: sandboxID,
}
status := containerstore.Status{
Pid: PID,
CreatedAt: createdAt,
StartedAt: startedAt,
FinishedAt: finishedAt,
}
container, err := containerstore.NewContainer(meta,
containerstore.WithFakeStatus(status),
)
if err != nil {
return err
}
return c.containerStore.Add(container)
}
func TestValidateTargetContainer(t *testing.T) {
testSandboxID := "test-sandbox-uid"
// The existing container that will be targeted.
testTargetContainerID := "test-target-container"
testTargetContainerPID := uint32(4567)
// A container that has finished running and cannot be targeted.
testStoppedContainerID := "stopped-target-container"
testStoppedContainerPID := uint32(6789)
// A container from another pod.
testOtherContainerSandboxID := "other-sandbox-uid"
testOtherContainerID := "other-target-container"
testOtherContainerPID := uint32(7890)
// Container create/start/stop times.
createdAt := time.Now().Add(-15 * time.Second).UnixNano()
startedAt := time.Now().Add(-10 * time.Second).UnixNano()
finishedAt := time.Now().Add(-5 * time.Second).UnixNano()
c := newTestCRIService()
// Create a target container.
err := addContainer(c, testTargetContainerID, testSandboxID, testTargetContainerPID, createdAt, startedAt, 0)
require.NoError(t, err, "error creating test target container")
// Create a stopped container.
err = addContainer(c, testStoppedContainerID, testSandboxID, testStoppedContainerPID, createdAt, startedAt, finishedAt)
require.NoError(t, err, "error creating test stopped container")
// Create a container in another pod.
err = addContainer(c, testOtherContainerID, testOtherContainerSandboxID, testOtherContainerPID, createdAt, startedAt, 0)
require.NoError(t, err, "error creating test container in other pod")
for desc, test := range map[string]struct {
targetContainerID string
expectError bool
}{
"target container in pod": {
targetContainerID: testTargetContainerID,
expectError: false,
},
"target stopped container in pod": {
targetContainerID: testStoppedContainerID,
expectError: true,
},
"target container does not exist": {
targetContainerID: "no-container-with-this-id",
expectError: true,
},
"target container in other pod": {
targetContainerID: testOtherContainerID,
expectError: true,
},
} {
t.Run(desc, func(t *testing.T) {
targetContainer, err := c.validateTargetContainer(testSandboxID, test.targetContainerID)
if test.expectError {
require.Error(t, err, "target should have been invalid but no error")
return
}
require.NoErrorf(t, err, "target should have been valid but got error")
assert.Equal(t, test.targetContainerID, targetContainer.ID, "returned target container does not have expected ID")
})
}
} | t.Fatal(err)
} |
replyCodes.js | // Generated by CoffeeScript 2.6.1
(function() {
/* jshint node:true */
/* jshint -W097 */
'use strict';
var _registerCodes, codeTagMap;
// Tags & Constant representation for reply codes
// authoritative documentation for all reply codes in current rets standard:
// http://www.reso.org/assets/RETS/Specifications/rets_1_8.pdf
codeTagMap = {};
// for readability, codes are presented in tag-code format, but we need to map them into code-tag format
// using multiple calls to this helper because some tag names are NOT globally unique
_registerCodes = function(tagCodeMap) {
var k, results, v;
results = [];
for (k in tagCodeMap) {
v = tagCodeMap[k];
results.push(codeTagMap[v] = k);
}
return results;
};
_registerCodes({
OPERATION_SUCCESSFUL: 0
});
_registerCodes({
SYSTEM_ERROR: 10000
});
_registerCodes({
ZERO_BALANCE: 20003,
BROKER_CODE_REQUIRED: 20012,
BROKER_CODE_INVALID: 20013,
DUPLICATE_LOGIN_PROHIBITED: 20022,
MISC_LOGIN_ERROR: 20036,
CLIENT_AUTHENTICATION_FAILED: 20037,
USER_AGENT_AUTHENTICATION_REQUIRED: 20041,
SERVER_TEMPORARILY_DISABLED: 20050
});
_registerCodes({
INSECURE_PASSWORD_DISALLOWED: 20140,
DUPLICATE_PASSWORD_DISALLOWED: 20141,
ENCRYPTED_USERNAME_INVALID: 20142
});
_registerCodes({
UNKNOWN_QUERY_FIELD: 20200,
NO_RECORDS_FOUND: 20201,
INVALID_SELECT: 20202,
MISC_SEARCH_ERROR: 20203,
INVALID_QUERY_SYNTAX: 20206,
UNAUTHORIZED_QUERY: 20207,
MAX_RECORDS_EXCEEDED: 20208,
TIMEOUT: 20209,
TOO_MANY_ACTIVE_QUERIES: 20210,
QUERY_TOO_COMPLEX: 20211,
INVALID_KEY_REQUEST: 20212,
INVALID_KEY: 20213
});
_registerCodes({
INVALID_PARAMETER: 20301,
RECORD_SAVE_ERROR: 20302,
MISC_UPDATE_ERROR: 20303,
WARNING_RESPONSE_NOT_GIVEN: 20311,
WARNING_RESPONSE_GIVEN: 20312
});
_registerCodes({
INVALID_RESOURCE: 20400,
INVALID_OBJECT_TYPE: 20401,
INVALID_IDENTIFIER: 20402,
NO_OBJECT_FOUND: 20403,
UNSUPPORTED_MIME_TYPE: 20406,
UNAUTHORIZED_RETRIEVAL: 20407,
RESOURCE_UNAVAILABLE: 20408,
OBJECT_UNAVAILABLE: 20409,
REQUEST_TOO_LARGE: 20410,
TIMEOUT: 20411,
TOO_MANY_ACTIVE_REQUESTS: 20412,
MISC_ERROR: 20413
});
_registerCodes({
INVALID_RESOURCE: 20500,
INVALID_METADATA_TYPE: 20501,
INVALID_IDENTIFIER: 20502,
NO_METADATA_FOUND: 20503,
UNSUPPORTED_MIME_TYPE: 20506,
UNAUTHORIZED_RETRIEVAL: 20507,
RESOURCE_UNAVAILABLE: 20508,
METADATA_UNAVAILABLE: 20509,
REQUEST_TOO_LARGE: 20510,
TIMEOUT: 20511,
TOO_MANY_ACTIVE_REQUESTS: 20512,
MISC_ERROR: 20513,
DTD_VERSION_UNAVAIL: 20514
});
| MISC_TRANSACTION_ERROR: 20702
});
_registerCodes({
UNKNOWN_RESOURCE: 20800,
INVALID_OBJECT_TYPE: 20801,
INVALID_IDENTIFIER: 20802,
INVALID_UPDATE_ACTION: 20803,
INCONSISTENT_REQUEST_PARAMETERS: 20804,
DELETE_TARGET_NOT_FOUND: 20805,
UNSUPPORTED_MIME_TYPE: 20806,
UNAUTHORIZED: 20807,
SOME_OBJECTS_NOT_DELETED: 20808,
BUSINESS_RULES_VIOLATION: 20809,
FILE_TOO_LARGE: 20810,
TIMEOUT: 20811,
TOO_MANY_ACTIVE_REQUESTS: 20812,
MISC_ERROR: 20813
});
module.exports = {
tagMap: codeTagMap,
getReplyTag: function(code) {
return codeTagMap[code];
}
};
}).call(this); | _registerCodes({
NOT_LOGGED_IN: 20701, |
__init__.py | """ | Handle the files generation.
""" |
|
sys$CategoryAttributeEnumValue.ts | import { BaseUuidEntity } from "./sys$BaseUuidEntity";
export class | extends BaseUuidEntity {
static NAME = "sys$CategoryAttributeEnumValue";
value?: string | null;
localizedValues?: string | null;
}
export type CategoryAttributeEnumValueViewName =
| "_base"
| "_local"
| "_minimal";
export type CategoryAttributeEnumValueView<
V extends CategoryAttributeEnumValueViewName
> = never;
| CategoryAttributeEnumValue |
template.rs | use std::collections::VecDeque;
use std::iter::FromIterator;
#[derive(PartialEq, Eq, Clone)]
enum Token {
Expression(String),
Statement(String),
Text(String),
}
#[derive(PartialEq, Eq)]
enum Context {
Comment,
Expression,
Statement,
StatementLine,
Text,
}
// Performs a lexical analysis on a given template.
fn tokenize(content: &str) -> Result<VecDeque<Token>, ()> {
let mut content = VecDeque::from_iter(content.chars());
let mut tokens = VecDeque::new();
let mut context = Context::Text;
let mut buf = String::new();
loop {
let curr =
match content.pop_front() {
Some(c) => c,
None => {
match context {
Context::Comment | Context::Expression | Context::Statement => return Err(()),
Context::StatementLine => {
if !buf.is_empty() {
tokens.push_back(Token::Statement(buf.trim().to_owned()));
}
},
Context::Text => {
if !buf.is_empty() {
tokens.push_back(Token::Text(buf.clone()));
}
},
} // context
break;
},
};
match context {
Context::Comment | Context::Expression | Context::Statement => {
// Directive.
if curr == '%' {
// '%'
match content.pop_front() {
Some('>') => {
// '%>'
// Change of mode -> text.
if !buf.is_empty() {
match context {
Context::Comment => {
if buf.ends_with("-") {
// Remove whitespaces until the next newline.
let mut cnt = 0;
for (_, c) in content.iter().enumerate() {
if *c == ' ' || *c == '\t' {
cnt += 1;
continue;
} else if *c == '\n' {
cnt += 1;
break;
} else {
cnt = 0;
break;
}
} // for
loop {
if cnt == 0 {
break;
}
content.pop_front();
cnt -= 1;
} // loop
}
},
Context::Expression => {
tokens.push_back(Token::Expression(buf.trim().to_owned()));
},
Context::Statement => {
if buf.ends_with("-") {
buf.pop();
// Remove whitespaces until the next newline.
let mut cnt = 0;
for (_, c) in content.iter().enumerate() {
if *c == ' ' || *c == '\t' {
cnt += 1;
continue;
} else if *c == '\n' {
cnt += 1;
break;
} else {
cnt = 0;
break;
}
} // for
loop {
if cnt == 0 {
break;
}
content.pop_front();
cnt -= 1;
} // loop
}
tokens.push_back(Token::Statement(buf.trim().to_owned()));
},
_ => panic!("wtf"),
} // match context
buf.clear();
}
context = Context::Text;
},
Some('%') => {
// '%%'
// Escape '%'.
buf.push('%');
},
Some(_) => return Err(()),
None => return Err(()),
} // match content.pop_front()
} else {
buf.push(curr);
}
},
Context::StatementLine => {
// Directive line.
if curr == '\n' {
// '\n'
// Change of mode -> text.
tokens.push_back(Token::Statement(buf.trim().to_owned()));
buf.clear();
context = Context::Text;
} else {
buf.push(curr);
}
},
Context::Text => {
// Text.
if curr == '<' {
// '<'
match content.pop_front() {
Some('%') => {
// '<%'
match content.pop_front() {
Some('%') => {
// '<%%'
// Escape '%'.
buf.push('<');
buf.push('%');
},
Some('#') => {
// '<%#'
// Change of mode -> comment.
if !buf.is_empty() {
tokens.push_back(Token::Text(buf.clone()));
buf.clear();
}
context = Context::Comment;
},
Some('=') => {
// '<%='
// Change of mode -> expression.
if !buf.is_empty() {
tokens.push_back(Token::Text(buf.clone()));
buf.clear();
}
context = Context::Expression;
},
Some('-') => {
// '<%-'
// Change of mode -> statement.
if !buf.is_empty() {
let mut cnt = 0;
let mut tmp = buf.clone();
loop {
match tmp.pop() {
Some(c) => {
if c == ' ' || c == '\t' {
cnt += 1;
continue;
} else if c == '\n' {
break;
} else {
cnt = 0;
break;
}
},
None => {
cnt = 0;
break;
},
}
} // loop
loop {
if cnt == 0 {
break;
}
buf.pop();
cnt -= 1;
} // loop
tokens.push_back(Token::Text(buf.clone()));
buf.clear();
}
context = Context::Statement;
},
Some(c) => {
// Change of mode -> statement.
if !buf.is_empty() {
tokens.push_back(Token::Text(buf.clone()));
buf.clear();
}
buf.push(c);
context = Context::Statement;
},
None => return Err(()),
} // match content.pop_front()
},
Some(c) => {
buf.push('<');
buf.push(c);
},
None => {
buf.push('<');
tokens.push_back(Token::Text(buf.clone()));
break;
},
} // match content.pop_front()
} else if curr == '%' {
// '%'
// Check the '%' starts a new line.
match buf.chars().last() {
Some('\n') => {
// '\n%'
tokens.push_back(Token::Text(buf.clone()));
buf.clear();
},
Some(_) => {
buf.push('%');
continue;
},
None => {
// No ongoing text.
},
} // match buf.chars().last()
// Check ahead.
match content.pop_front() {
Some('\n') => {
// '%\n'
// Nothing to do.
},
Some('%') => {
// '%%'
// Escape '%'.
buf.push('%');
},
Some(c) => {
// '%.'
buf.push(c);
context = Context::StatementLine;
},
None => break,
} // match content.pop_front()
} else {
buf.push(curr);
}
},
} // match context
} // loop
Ok(tokens)
}
// Converts a given template into lua code.
fn parse_template(content: &str) -> Result<String, ()> {
let mut tokens =
match tokenize(content) {
Ok(t) => t,
Err(_) => return Err(()),
};
let mut res = String::new();
res.push_str("local _sb = {}\n");
loop {
let token =
match tokens.pop_front() {
Some(t) => t,
None => break,
};
match token {
Token::Expression(e) => {
#[cfg(feature = "debug")]
eprintln!("[Expression] '{}'", e);
res.push_str("table.insert(_sb, ");
res.push_str(&e);
res.push_str(")\n");
},
Token::Statement(s) => {
#[cfg(feature = "debug")]
eprintln!("[Statement] '{}'", s);
res.push_str(&s);
res.push_str("\n");
},
Token::Text(t) => {
#[cfg(feature = "debug")]
eprintln!("[Text] '{}'", t);
res.push_str("table.insert(_sb, [[\n");
res.push_str(&t);
res.push_str("]])\n");
},
}
} // loop
res.push_str("ctx:set_output(_sb)\n");
Ok(res)
}
pub struct | {
pub content: String,
}
impl Template {
pub fn for_path(path: &str) -> Self {
let template_content =
match std::fs::read_to_string(path) {
Ok(c) => c,
Err(e) => panic!("{}", e),
};
Template {
content: parse_template(&template_content).unwrap(),
}
}
}
| Template |
t1.py | weight=1
a=_State('a', name='var1', shared=True)
def run():
@_do
def _():
print(a.val)
sleep(10)
a.val = 5
@_do
def _():
print(a.val)
sleep(10)
a.val = 8
@_do
def _():
| print(a.val) |
|
doc.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
// Package videointelligence is an auto-generated package for the
// Cloud Video Intelligence API.
//
// Detects objects, explicit content, and scene changes in videos. It also
// specifies the region for annotation and transcribes speech to text.
// Supports both asynchronous API and streaming API.
//
// Example usage
//
// To get started with this package, create a client.
// ctx := context.Background()
// c, err := videointelligence.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// The client will use your default application credentials. Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
// The returned client must be Closed when it is done being used.
//
// Using the Client
//
// The following is an example of making an API call with the newly created client.
//
// ctx := context.Background()
// c, err := videointelligence.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// req := &videointelligencepb.AnnotateVideoRequest{
// // TODO: Fill request struct fields.
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/videointelligence/v1#AnnotateVideoRequest.
// }
// op, err := c.AnnotateVideo(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
//
// resp, err := op.Wait(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// // TODO: Use resp.
// _ = resp
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit https://pkg.go.dev/cloud.google.com/go.
package videointelligence // import "cloud.google.com/go/videointelligence/apiv1"
import (
"context"
"os"
"runtime"
"strconv"
"strings"
"unicode"
"google.golang.org/api/option"
"google.golang.org/grpc/metadata"
)
// For more information on implementing a client constructor hook, see
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20220108"
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
func checkDisableDeadlines() (bool, error) {
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
if !ok {
return false, nil
}
b, err := strconv.ParseBool(raw)
return b, err
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func | () string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
| versionGo |
data3_cmb4.rs | #[doc = "Register `DATA3_CMB4` reader"]
pub struct R(crate::R<DATA3_CMB4_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<DATA3_CMB4_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<DATA3_CMB4_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<DATA3_CMB4_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `DATA3_CMB4` writer"]
pub struct W(crate::W<DATA3_CMB4_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<DATA3_CMB4_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<DATA3_CMB4_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<DATA3_CMB4_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `BYTE7` reader - Data Byte 7"]
pub struct BYTE7_R(crate::FieldReader<u8, u8>);
impl BYTE7_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
BYTE7_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BYTE7_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BYTE7` writer - Data Byte 7"]
pub struct BYTE7_W<'a> {
w: &'a mut W,
}
impl<'a> BYTE7_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 8)) | ((value as u32 & 0xff) << 8);
self.w
}
}
#[doc = "Field `BYTE8` reader - Data Byte 8"]
pub struct BYTE8_R(crate::FieldReader<u8, u8>);
impl BYTE8_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
BYTE8_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BYTE8_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BYTE8` writer - Data Byte 8"]
pub struct BYTE8_W<'a> {
w: &'a mut W,
}
impl<'a> BYTE8_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 8:15 - Data Byte 7"]
#[inline(always)]
pub fn byte7(&self) -> BYTE7_R {
BYTE7_R::new(((self.bits >> 8) & 0xff) as u8)
}
#[doc = "Bits 0:7 - Data Byte 8"]
#[inline(always)]
pub fn byte8(&self) -> BYTE8_R {
BYTE8_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 8:15 - Data Byte 7"]
#[inline(always)]
pub fn byte7(&mut self) -> BYTE7_W {
BYTE7_W { w: self }
}
#[doc = "Bits 0:7 - Data Byte 8"]
#[inline(always)]
pub fn | (&mut self) -> BYTE8_W {
BYTE8_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "CAN Frame Data Word 3\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [data3_cmb4](index.html) module"]
pub struct DATA3_CMB4_SPEC;
impl crate::RegisterSpec for DATA3_CMB4_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [data3_cmb4::R](R) reader structure"]
impl crate::Readable for DATA3_CMB4_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [data3_cmb4::W](W) writer structure"]
impl crate::Writable for DATA3_CMB4_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets DATA3_CMB4 to value 0"]
impl crate::Resettable for DATA3_CMB4_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| byte8 |
ratelimit_test.go | package middleware
import (
"context"
"errors"
"net"
"testing"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/spire/pkg/server/api"
"github.com/spiffe/spire/pkg/server/api/rpccontext"
"github.com/spiffe/spire/test/clock"
"github.com/spiffe/spire/test/spiretest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestNoLimit(t *testing.T) {
limiters := NewFakeLimiters()
// NoLimit() does not do rate limiting and should succeed.
m := NoLimit()
require.NoError(t, m.RateLimit(context.Background(), 99))
// There should be no rate limiters configured as NoLimit() doesn't use one.
assert.Equal(t, 0, limiters.Count)
}
func TestDisabledLimit(t *testing.T) {
limiters := NewFakeLimiters()
// DisabledLimit() does not do rate limiting and should succeed.
m := DisabledLimit()
require.NoError(t, m.RateLimit(context.Background(), 99))
// There should be no rate limiters configured as DisabledLimit() doesn't use one.
assert.Equal(t, 0, limiters.Count)
}
func TestPerCallLimit(t *testing.T) {
limiters := NewFakeLimiters()
m := PerCallLimit(1)
// Exceeds burst size.
err := m.RateLimit(context.Background(), 2)
spiretest.RequireGRPCStatus(t, err, codes.ResourceExhausted, "rate (2) exceeds burst size (1)")
// Within burst size.
require.NoError(t, m.RateLimit(context.Background(), 1))
// There should be a single rate limiter. WaitN should have only been
// called once for the call that didn't exceed the burst size.
assert.Equal(t, 1, limiters.Count)
assert.Equal(t, []WaitNEvent{
{ID: 1, Count: 1},
}, limiters.WaitNEvents)
}
func TestPerIPLimit(t *testing.T) {
limiters := NewFakeLimiters()
m := PerIPLimit(10)
// Does not rate limit non-TCP/IP callers
err := m.RateLimit(unixCallerContext(), 11)
require.NoError(t, err)
// Once exceeding burst size for 1.1.1.1
err = m.RateLimit(tcpCallerContext("1.1.1.1"), 11)
spiretest.RequireGRPCStatus(t, err, codes.ResourceExhausted, "rate (11) exceeds burst size (10)")
// Once within burst size for 1.1.1.1
require.NoError(t, m.RateLimit(tcpCallerContext("1.1.1.1"), 1))
// Twice within burst size for 2.2.2.2
require.NoError(t, m.RateLimit(tcpCallerContext("2.2.2.2"), 2))
require.NoError(t, m.RateLimit(tcpCallerContext("2.2.2.2"), 3))
// There should be two rate limiters; 1.1.1.1, and 2.2.2.2
assert.Equal(t, 2, limiters.Count)
// WaitN should have only been called once for 1.1.1.1 (burst failure does
// not result in a call to WaitN) and twice for 2.2.2.2.
assert.Equal(t, []WaitNEvent{
{ID: 1, Count: 1},
{ID: 2, Count: 2},
{ID: 2, Count: 3},
}, limiters.WaitNEvents)
}
func TestPerIPLimitGC(t *testing.T) {
mockClk, restoreClk := setupClock(t)
defer restoreClk()
limiters := NewFakeLimiters()
m := PerIPLimit(2)
// Create limiters for both 1.1.1.1 and 2.2.2.2
require.NoError(t, m.RateLimit(tcpCallerContext("1.1.1.1"), 1))
require.NoError(t, m.RateLimit(tcpCallerContext("2.2.2.2"), 1))
require.Equal(t, 2, limiters.Count)
// Advance past the GC time and create for limiter for 3.3.3.3. This should
// move both 1.1.1.1 and 2.2.2.2 into the "previous" set. There should be
// three total limiters now.
mockClk.Add(gcInterval)
require.NoError(t, m.RateLimit(tcpCallerContext("3.3.3.3"), 1))
require.Equal(t, 3, limiters.Count)
// Now use the 1.1.1.1 limiter. This should transition it into the
// "current" set. Assert that no new limiter is created.
require.NoError(t, m.RateLimit(tcpCallerContext("1.1.1.1"), 1))
require.Equal(t, 3, limiters.Count)
// Advance to the next GC time. Create a limiter for 4.4.4.4. This should
// cause 2.2.2.2 to be removed. 1.1.1.1 and 3.3.3.3 will go into the
// "previous set".
mockClk.Add(gcInterval)
require.NoError(t, m.RateLimit(tcpCallerContext("4.4.4.4"), 1))
require.Equal(t, 4, limiters.Count)
// Use all of the limiters but 2.2.2.2 and make sure the limiter count is stable.
require.NoError(t, m.RateLimit(tcpCallerContext("1.1.1.1"), 1))
require.NoError(t, m.RateLimit(tcpCallerContext("3.3.3.3"), 1))
require.NoError(t, m.RateLimit(tcpCallerContext("4.4.4.4"), 1))
require.Equal(t, 4, limiters.Count)
// Now do 2.2.2.2. A new limiter will be created for 2.2.2.2, since the
// limiter for 2.2.2.2 was previously removed after the last GC period.
require.NoError(t, m.RateLimit(tcpCallerContext("2.2.2.2"), 1))
require.Equal(t, 5, limiters.Count)
}
func TestRateLimits(t *testing.T) {
for _, tt := range []struct {
name string
method string
prepareCtx func(context.Context) context.Context
rateLimitCount int
returnErr error
downstreamErr error
expectLogs []spiretest.LogEntry
expectCode codes.Code
expectMsg string
}{
{
name: "RPC fails if method not configured for rate limiting",
method: "/fake.Service/Whoopsie",
expectCode: codes.Internal,
expectMsg: `rate limiting misconfigured for "/fake.Service/Whoopsie"`,
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Rate limiting misconfigured; this is a bug",
},
},
},
{
name: "logs when rate limiter not used by handler",
method: "/fake.Service/WithLimit",
expectCode: codes.OK,
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Rate limiter went unused; this is a bug",
},
},
},
{
name: "does not log if handler returns invalid argument",
method: "/fake.Service/WithLimit",
returnErr: status.Error(codes.InvalidArgument, "ohno!"),
expectCode: codes.InvalidArgument,
expectMsg: `ohno!`,
},
{
name: "does not log if handler was never invoked",
method: "/fake.Service/WithLimit",
downstreamErr: status.Error(codes.PermissionDenied, "permission denied"),
expectCode: codes.PermissionDenied,
expectMsg: `permission denied`,
},
{
name: "logs when handler with no limit tries to rate limit",
method: "/fake.Service/NoLimit",
rateLimitCount: 1,
expectCode: codes.OK,
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Rate limiter used unexpectedly; this is a bug",
},
},
},
{
name: "does not log when handler with disabled limit tries to rate limit",
method: "/fake.Service/DisabledLimit",
rateLimitCount: 1,
expectCode: codes.OK,
},
{
name: "logs when handler with disabled limit does not rate limit",
method: "/fake.Service/DisabledLimit",
expectCode: codes.OK,
expectLogs: []spiretest.LogEntry{
{
Level: logrus.ErrorLevel,
Message: "Disabled rate limiter went unused; this is a bug",
},
},
},
{
name: "does not log when rate limiter not used by unlimited handler",
method: "/fake.Service/NoLimit",
expectCode: codes.OK,
},
{
name: "does not log when rate limiter used by limited handler",
method: "/fake.Service/WithLimit",
rateLimitCount: 1,
},
{
name: "returns resource exhausted when rate limiting fails",
method: "/fake.Service/WithLimit",
rateLimitCount: 3,
expectCode: codes.ResourceExhausted,
expectMsg: "rate (3) exceeds burst size (2)",
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
log, hook := test.NewNullLogger()
ctx := rpccontext.WithLogger(context.Background(), log)
if tt.prepareCtx != nil {
ctx = tt.prepareCtx(ctx)
}
serverInfo := &grpc.UnaryServerInfo{FullMethod: tt.method}
handler := func(ctx context.Context, _ interface{}) (interface{}, error) {
if tt.rateLimitCount > 0 {
if err := rpccontext.RateLimit(ctx, tt.rateLimitCount); err != nil {
return nil, err
}
}
if tt.returnErr != nil {
return nil, tt.returnErr
}
return struct{}{}, nil
}
unaryInterceptor := UnaryInterceptor(Chain(
WithRateLimits(
map[string]api.RateLimiter{
"/fake.Service/NoLimit": NoLimit(),
"/fake.Service/DisabledLimit": DisabledLimit(),
"/fake.Service/WithLimit": PerCallLimit(2),
},
),
// Install a middleware downstream so that we can test what
// happens in postprocess if the handler is never invoked.
Preprocess(func(ctx context.Context, fullMethod string) (context.Context, error) {
return ctx, tt.downstreamErr
}),
))
resp, err := unaryInterceptor(ctx, struct{}{}, serverInfo, handler)
spiretest.AssertGRPCStatus(t, err, tt.expectCode, tt.expectMsg)
if err == nil | else {
assert.Nil(t, resp)
}
spiretest.AssertLogs(t, hook.AllEntries(), tt.expectLogs)
})
}
}
type WaitNEvent struct {
ID int
Count int
}
type FakeLimiters struct {
Count int
WaitNEvents []WaitNEvent
}
func NewFakeLimiters() *FakeLimiters {
ls := &FakeLimiters{}
newRawRateLimiter = ls.newRawRateLimiter
return ls
}
func (ls *FakeLimiters) newRawRateLimiter(limit rate.Limit, burst int) rawRateLimiter {
ls.Count++
return &fakeLimiter{
id: ls.Count,
waitN: ls.waitN,
limit: limit,
burst: burst,
}
}
func (ls *FakeLimiters) waitN(ctx context.Context, id, count int) error {
ls.WaitNEvents = append(ls.WaitNEvents, WaitNEvent{
ID: id,
Count: count,
})
return nil
}
type fakeLimiter struct {
id int
waitN func(ctx context.Context, id, count int) error
limit rate.Limit
burst int
}
func (l *fakeLimiter) WaitN(ctx context.Context, count int) error {
switch {
case l.limit == rate.Inf:
// Limiters should never be unlimited.
return errors.New("unexpected infinite limit on limiter")
case count > l.burst:
// the waitN() function should have already taken care of this check
// in order to provide nicer error messaging than that provided by
// the rate package.
return errors.New("exceeding burst should have already been handled")
}
return l.waitN(ctx, l.id, count)
}
func (l *fakeLimiter) Limit() rate.Limit {
return l.limit
}
func (l *fakeLimiter) Burst() int {
return l.burst
}
func unixCallerContext() context.Context {
return rpccontext.WithCallerAddr(context.Background(), &net.UnixAddr{
Net: "unix",
Name: "/not/a/real/path.sock",
})
}
func tcpCallerContext(ip string) context.Context {
return rpccontext.WithCallerAddr(context.Background(), &net.TCPAddr{
IP: net.ParseIP(ip),
})
}
func setupClock(t *testing.T) (*clock.Mock, func()) {
mockClk := clock.NewMock(t)
oldClk := clk
clk = mockClk
return mockClk, func() {
clk = oldClk
}
}
| {
assert.NotNil(t, resp)
} |
test_button07.py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('button07.xlsm')
def test_create_file(self):
|
def test_create_file_explicit_vba_names(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
workbook.set_vba_name('ThisWorkbook')
worksheet.set_vba_name('Sheet1')
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual()
def test_create_file_implicit_vba_names(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual()
| """Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
workbook.set_vba_name()
worksheet.set_vba_name()
worksheet.insert_button('C2', {'macro': 'say_hello',
'caption': 'Hello'})
workbook.add_vba_project(self.vba_dir + 'vbaProject02.bin')
workbook.close()
self.assertExcelEqual() |
service.py | import re
import html
import json
import requests
from bs4 import BeautifulSoup
class BamahutExporterService:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'})
def is_last_page(self, page, response):
return page > int(re.search('var args =.*page=([0-9]+)', response.text).group(1))
def parse_replies(self, bsn, snB):
replies = []
response = self.session.get('https://forum.gamer.com.tw/ajax/moreCommend.php', params = {'bsn': bsn, 'snB': snB}).json()
response.pop('next_snC')
for reply in response.values():
replies.append(
{
'username' : reply['userid'],
'nickname' : reply['nick'],
'datetime' : reply['wtime'],
'content' : reply['content'], | )
replies.reverse()
return replies
def parse_floor(self, bsn, floor):
if (hint := floor.find('div', {'class': 'hint'})) is not None:
return {
'floor' : floor.find('div', {'class': 'floor'}).text,
'hint' : hint.text,
}
else:
return {
'floor' : floor.find('a', {'class': 'floor tippy-gpbp'}).text,
'username' : floor.find('a', {'class': 'userid'}).text,
'nickname' : floor.find('a', {'class': 'username'}).text,
'datetime' : floor.find('a', {'class': 'edittime tippy-post-info'}).get('data-mtime'),
'content' : floor.find('div', {'class': 'c-article__content'}),
'replies' : self.parse_replies(bsn, floor.get('id').replace('post_', '')),
}
def export(self, bsn, snA):
page = 0
floors = []
while True:
# Get page
page += 1
response = self.session.get('https://forum.gamer.com.tw/C.php', params = {'bsn': bsn, 'snA': snA, 'page': page})
soup = BeautifulSoup(response.text, 'html.parser')
# Break loop when the page is last
if self.is_last_page(page, response):
return floors
# Get floors
for floor in soup.find_all('section', {'class': 'c-section', 'id': re.compile('.*')}):
floors.append(self.parse_floor(bsn, floor)) | 'comment' : html.escape('{"content":"%s"}' % reply['content']),
} |
error.rs | use std::fmt;
use crate::{FuncIndex, GraphCycles, NodeLabel};
/// Represents error that may occur while doing gas estimation
#[derive(Debug, PartialEq, Clone)]
pub enum ProgramError<T = FuncIndex>
where
T: NodeLabel,
{
/// Invalid wasm
InvalidWasm,
/// Floats not allowed
FloatsNotAllowed,
/// Too many function imports
TooManyFunctionImports,
/// Function index is too large
FunctionIndexTooLarge,
/// `call_indirect` isn't allowed
CallIndirectNotAllowed,
/// `loop` isn't allowed
LoopNotAllowed,
/// Wasm has no `code` section
MissingCodeSection,
/// Recursive calls aren't allowed
RecursiveCall {
/// Function containing the recursive-call
func: FuncIndex,
/// The `call` instruction offset relative to the beginning of the function
offset: usize,
},
/// Calls cycles (e.g `A -> B -> C -> A`) aren't allowed
CallCycle(GraphCycles<T>),
}
impl<T> fmt::Display for ProgramError<T>
where
T: NodeLabel,
{
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
| fmt |
filter_dvl.py | import argparse
import datetime
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
from typing import Dict, List
import data
import filters
import utilities
import utm
def | (data_config: data.DataConfiguration, \
filter_config: filters.FilterConfiguration):
"""
"""
# Read data.
data = pd.read_csv(data_config.input)
# Extract relevant data for filtering.
time = data["Epoch"].to_numpy()
altitude = data["Altitude"].to_numpy()
# Calculate sampling frequency.
filter_config.sample_frequency = 1 / np.mean(time[1:] - time[0:-1])
# Add end values.
filtered_altitude = filters.add_appendage(altitude, filter_config)
# Filter data and account for time delay.
filtered_altitude, filter_delay = filters.FIR_filter(filtered_altitude, \
filter_config, axis=1)
filtered_time = time - filter_delay
print("\nDVL:")
print(" - Sampling time: {0:.4f}".format( \
1 / filter_config.sample_frequency))
print(" - Sampling frequency: {0:.4f}".format( \
filter_config.sample_frequency))
print(" - Filter time delay: {0:.4f}".format(filter_delay))
# Remove end values.
filtered_altitude = filters.remove_appendage(filtered_altitude, \
filter_config)
filtered_data = pd.DataFrame()
filtered_data["Epoch"] = filtered_time
filtered_data["Altitude"] = filtered_altitude
# Datetime calculations.
times = []
for epoch in filtered_data["Epoch"]:
time = datetime.datetime.fromtimestamp(epoch).strftime( \
data_config.datetime_format)
times.append(time)
filtered_data["Datetime"] = np.array(times, dtype=str)
# Save data.
if data_config.save_output:
filtered_data = pd.DataFrame(filtered_data)
filtered_data.to_csv(data_config.output + "ROV-DVL.csv", sep=',')
def main():
# Parse arguments.
parser = argparse.ArgumentParser( \
description="Filter DVL data with a FIR lowpass filter.")
parser.add_argument("input", type=str, help="Input file path.")
parser.add_argument("output", type=str, help="Output directory path.")
parser.add_argument("order", type=int, help="Filter order.")
parser.add_argument("cutoff", type=float, help="Filter cutoff.")
parser.add_argument("appendage", type=int, help="Filter appendage.")
parser.add_argument('--show_figures', type=bool, default=False, \
help= "Show figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_figures', type=bool, default=False, \
help= "Save figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_output', type=bool, default=False, \
help= "Save output.", action=argparse.BooleanOptionalAction)
args = parser.parse_args()
# Data configuration.
data_config = data.DataConfiguration(args.input, args.output, \
args.show_figures, args.save_figures, args.save_output)
# Filter configuration.
filter_config = filters.FilterConfiguration(args.order, args.cutoff, \
args.appendage)
# Filter data.
filter_dvl(data_config, filter_config)
if __name__ == '__main__':
main()
| filter_dvl |
server.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app makes it easy to create a kubelet server for various contexts.
package app
import (
"context"
"crypto/tls"
"errors"
"fmt"
"math"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/coreos/go-systemd/daemon"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/connrotation"
"k8s.io/client-go/util/keyutil"
cloudprovider "k8s.io/cloud-provider"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/configz"
"k8s.io/component-base/featuregate"
"k8s.io/component-base/logs"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet"
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/config/validation"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
"k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/eviction"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
dynamickubeletconfig "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/server"
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/kubernetes/pkg/util/flock"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/rlimit"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/exec"
utilnet "k8s.io/utils/net"
)
const (
// Kubelet component name
componentKubelet = "kubelet"
)
// NewKubeletCommand creates a *cobra.Command object with default parameters
func NewKubeletCommand(ctx context.Context) *cobra.Command {
cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError)
cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
kubeletFlags := options.NewKubeletFlags()
kubeletConfig, err := options.NewKubeletConfiguration()
// programmer error
if err != nil {
klog.ErrorS(err, "Failed to create a new kubelet configuration")
os.Exit(1)
}
cmd := &cobra.Command{
Use: componentKubelet,
Long: `The kubelet is the primary "node agent" that runs on each
node. It can register the node with the apiserver using one of: the hostname; a flag to
override the hostname; or specific logic for a cloud provider.
The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object
that describes a pod. The kubelet takes a set of PodSpecs that are provided through
various mechanisms (primarily through the apiserver) and ensures that the containers
described in those PodSpecs are running and healthy. The kubelet doesn't manage
containers which were not created by Kubernetes.
Other than from an PodSpec from the apiserver, there are three ways that a container
manifest can be provided to the Kubelet.
File: Path passed as a flag on the command line. Files under this path will be monitored
periodically for updates. The monitoring period is 20s by default and is configurable
via a flag.
HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint
is checked every 20 seconds (also configurable with a flag).
HTTP server: The kubelet can also listen for HTTP and respond to a simple API
(underspec'd currently) to submit a new manifest.`,
// The Kubelet has special flag parsing requirements to enforce flag precedence rules,
// so we do all our parsing manually in Run, below.
// DisableFlagParsing=true provides the full set of flags passed to the kubelet in the
// `args` arg to Run, without Cobra's interference.
DisableFlagParsing: true,
Run: func(cmd *cobra.Command, args []string) {
// initial flag parse, since we disable cobra's flag parsing
if err := cleanFlagSet.Parse(args); err != nil {
klog.ErrorS(err, "Failed to parse kubelet flag")
cmd.Usage()
os.Exit(1)
}
// check if there are non-flag arguments in the command line
cmds := cleanFlagSet.Args()
if len(cmds) > 0 {
klog.ErrorS(nil, "Unknown command", "command", cmds[0])
cmd.Usage()
os.Exit(1)
}
// short-circuit on help
help, err := cleanFlagSet.GetBool("help")
if err != nil {
klog.InfoS(`"help" flag is non-bool, programmer error, please correct`)
os.Exit(1)
}
if help {
cmd.Help()
return
}
// short-circuit on verflag
verflag.PrintAndExitIfRequested()
cliflag.PrintFlags(cleanFlagSet)
// set feature gates from initial flags-based config
if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
klog.ErrorS(err, "Failed to set feature gates from initial flags-based config")
os.Exit(1)
}
// validate the initial KubeletFlags
if err := options.ValidateKubeletFlags(kubeletFlags); err != nil {
klog.ErrorS(err, "Failed to validate kubelet flags")
os.Exit(1)
}
if kubeletFlags.ContainerRuntime == "remote" && cleanFlagSet.Changed("pod-infra-container-image") {
klog.InfoS("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead")
}
// load kubelet config file, if provided
if configFile := kubeletFlags.KubeletConfigFile; len(configFile) > 0 {
kubeletConfig, err = loadConfigFile(configFile)
if err != nil {
klog.ErrorS(err, "Failed to load kubelet config file", "path", configFile)
os.Exit(1)
}
// We must enforce flag precedence by re-parsing the command line into the new object.
// This is necessary to preserve backwards-compatibility across binary upgrades.
// See issue #56171 for more details.
if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil {
klog.ErrorS(err, "Failed to precedence kubeletConfigFlag")
os.Exit(1)
}
// update feature gates based on new config
if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
klog.ErrorS(err, "Failed to set feature gates from initial flags-based config")
os.Exit(1)
}
}
// We always validate the local configuration (command line + config file).
// This is the default "last-known-good" config for dynamic config, and must always remain valid.
if err := kubeletconfigvalidation.ValidateKubeletConfiguration(kubeletConfig); err != nil {
klog.ErrorS(err, "Failed to validate kubelet configuration", "path", kubeletConfig)
os.Exit(1)
}
if (kubeletConfig.KubeletCgroups != "" && kubeletConfig.KubeReservedCgroup != "") && (0 != strings.Index(kubeletConfig.KubeletCgroups, kubeletConfig.KubeReservedCgroup)) {
klog.InfoS("unsupported configuration:KubeletCgroups is not within KubeReservedCgroup")
}
// use dynamic kubelet config, if enabled
var kubeletConfigController *dynamickubeletconfig.Controller
if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 {
var dynamicKubeletConfig *kubeletconfiginternal.KubeletConfiguration
dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir,
func(kc *kubeletconfiginternal.KubeletConfiguration) error {
// Here, we enforce flag precedence inside the controller, prior to the controller's validation sequence,
// so that we get a complete validation at the same point where we can decide to reject dynamic config.
// This fixes the flag-precedence component of issue #63305.
// See issue #56171 for general details on flag precedence.
return kubeletConfigFlagPrecedence(kc, args)
})
if err != nil {
klog.ErrorS(err, "Failed to bootstrap a configuration controller", "dynamicConfigDir", dynamicConfigDir)
os.Exit(1)
}
// If we should just use our existing, local config, the controller will return a nil config
if dynamicKubeletConfig != nil {
kubeletConfig = dynamicKubeletConfig
// Note: flag precedence was already enforced in the controller, prior to validation,
// by our above transform function. Now we simply update feature gates from the new config.
if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
klog.ErrorS(err, "Failed to set feature gates from initial flags-based config")
os.Exit(1)
}
}
}
// construct a KubeletServer from kubeletFlags and kubeletConfig
kubeletServer := &options.KubeletServer{
KubeletFlags: *kubeletFlags,
KubeletConfiguration: *kubeletConfig,
}
// use kubeletServer to construct the default KubeletDeps
kubeletDeps, err := UnsecuredDependencies(kubeletServer, utilfeature.DefaultFeatureGate)
if err != nil {
klog.ErrorS(err, "Failed to construct kubelet dependencies")
os.Exit(1)
}
// add the kubelet config controller to kubeletDeps
kubeletDeps.KubeletConfigController = kubeletConfigController
if err := checkPermissions(); err != nil {
klog.ErrorS(err, "kubelet running with insufficient permissions")
}
// make the kubelet's config safe for logging
config := kubeletServer.KubeletConfiguration.DeepCopy()
for k := range config.StaticPodURLHeader {
config.StaticPodURLHeader[k] = []string{"<masked>"}
}
// log the kubelet's config for inspection
klog.V(5).InfoS("KubeletConfiguration", "configuration", kubeletServer.KubeletConfiguration)
// run the kubelet
if err := Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
klog.ErrorS(err, "Failed to run kubelet")
os.Exit(1)
}
},
}
// keep cleanFlagSet separate, so Cobra doesn't pollute it with the global flags
kubeletFlags.AddFlags(cleanFlagSet)
options.AddKubeletConfigFlags(cleanFlagSet, kubeletConfig)
options.AddGlobalFlags(cleanFlagSet)
cleanFlagSet.BoolP("help", "h", false, fmt.Sprintf("help for %s", cmd.Name()))
// ugly, but necessary, because Cobra's default UsageFunc and HelpFunc pollute the flagset with global flags
const usageFmt = "Usage:\n %s\n\nFlags:\n%s"
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine(), cleanFlagSet.FlagUsagesWrapped(2))
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine(), cleanFlagSet.FlagUsagesWrapped(2))
})
return cmd
}
// newFlagSetWithGlobals constructs a new pflag.FlagSet with global flags registered
// on it.
func newFlagSetWithGlobals() *pflag.FlagSet {
fs := pflag.NewFlagSet("", pflag.ExitOnError)
// set the normalize func, similar to k8s.io/component-base/cli//flags.go:InitFlags
fs.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
// explicitly add flags from libs that register global flags
options.AddGlobalFlags(fs)
return fs
}
// newFakeFlagSet constructs a pflag.FlagSet with the same flags as fs, but where
// all values have noop Set implementations
func newFakeFlagSet(fs *pflag.FlagSet) *pflag.FlagSet {
ret := pflag.NewFlagSet("", pflag.ExitOnError)
ret.SetNormalizeFunc(fs.GetNormalizeFunc())
fs.VisitAll(func(f *pflag.Flag) {
ret.VarP(cliflag.NoOp{}, f.Name, f.Shorthand, f.Usage)
})
return ret
}
// kubeletConfigFlagPrecedence re-parses flags over the KubeletConfiguration object.
// We must enforce flag precedence by re-parsing the command line into the new object.
// This is necessary to preserve backwards-compatibility across binary upgrades.
// See issue #56171 for more details.
func kubeletConfigFlagPrecedence(kc *kubeletconfiginternal.KubeletConfiguration, args []string) error {
// We use a throwaway kubeletFlags and a fake global flagset to avoid double-parses,
// as some Set implementations accumulate values from multiple flag invocations.
fs := newFakeFlagSet(newFlagSetWithGlobals())
// register throwaway KubeletFlags
options.NewKubeletFlags().AddFlags(fs)
// register new KubeletConfiguration
options.AddKubeletConfigFlags(fs, kc)
// Remember original feature gates, so we can merge with flag gates later
original := kc.FeatureGates
// re-parse flags
if err := fs.Parse(args); err != nil {
return err
}
// Add back feature gates that were set in the original kc, but not in flags
for k, v := range original {
if _, ok := kc.FeatureGates[k]; !ok {
kc.FeatureGates[k] = v
}
}
return nil
}
func loadConfigFile(name string) (*kubeletconfiginternal.KubeletConfiguration, error) {
const errFmt = "failed to load Kubelet config file %s, error %v"
// compute absolute path based on current working dir
kubeletConfigFile, err := filepath.Abs(name)
if err != nil {
return nil, fmt.Errorf(errFmt, name, err)
}
loader, err := configfiles.NewFsLoader(utilfs.DefaultFs{}, kubeletConfigFile)
if err != nil {
return nil, fmt.Errorf(errFmt, name, err)
}
kc, err := loader.Load()
if err != nil {
return nil, fmt.Errorf(errFmt, name, err)
}
return kc, err
}
// UnsecuredDependencies returns a Dependencies suitable for being run, or an error if the server setup
// is not valid. It will not start any background processes, and does not include authentication/authorization
func UnsecuredDependencies(s *options.KubeletServer, featureGate featuregate.FeatureGate) (*kubelet.Dependencies, error) {
// Initialize the TLS Options
tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration)
if err != nil {
return nil, err
}
mounter := mount.New(s.ExperimentalMounterPath)
subpather := subpath.New(mounter)
hu := hostutil.NewHostUtil()
var pluginRunner = exec.New()
var dockerOptions *kubelet.DockerOptions
if s.ContainerRuntime == kubetypes.DockerContainerRuntime {
dockerOptions = &kubelet.DockerOptions{
DockerEndpoint: s.DockerEndpoint,
RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration,
ImagePullProgressDeadline: s.ImagePullProgressDeadline.Duration,
}
}
plugins, err := ProbeVolumePlugins(featureGate)
if err != nil {
return nil, err
}
return &kubelet.Dependencies{
Auth: nil, // default does not enforce auth[nz]
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
Cloud: nil, // cloud provider might start background processes
ContainerManager: nil,
DockerOptions: dockerOptions,
KubeClient: nil,
HeartbeatClient: nil,
EventClient: nil,
HostUtil: hu,
Mounter: mounter,
Subpather: subpather,
OOMAdjuster: oom.NewOOMAdjuster(),
OSInterface: kubecontainer.RealOS{},
VolumePlugins: plugins,
DynamicPluginProber: GetDynamicPluginProber(s.VolumePluginDir, pluginRunner),
TLSOptions: tlsOptions}, nil
}
// Run runs the specified KubeletServer with the given Dependencies. This should never exit.
// The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the Dependencies object and a default one will
// not be generated.
func Run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Dependencies, featureGate featuregate.FeatureGate) error {
logOption := logs.NewOptions()
logOption.LogFormat = s.Logging.Format
logOption.LogSanitization = s.Logging.Sanitization
logOption.Apply()
// To help debugging, immediately log version
klog.InfoS("Kubelet version", "kubeletVersion", version.Get())
if err := initForOS(s.KubeletFlags.WindowsService, s.KubeletFlags.WindowsPriorityClass); err != nil {
return fmt.Errorf("failed OS init: %v", err)
}
if err := run(ctx, s, kubeDeps, featureGate); err != nil {
return fmt.Errorf("failed to run Kubelet: %v", err)
}
return nil
}
func setConfigz(cz *configz.Config, kc *kubeletconfiginternal.KubeletConfiguration) error {
scheme, _, err := kubeletscheme.NewSchemeAndCodecs()
if err != nil {
return err
}
versioned := kubeletconfigv1beta1.KubeletConfiguration{}
if err := scheme.Convert(kc, &versioned, nil); err != nil {
return err
}
cz.Set(versioned)
return nil
}
func initConfigz(kc *kubeletconfiginternal.KubeletConfiguration) error {
cz, err := configz.New("kubeletconfig")
if err != nil {
klog.ErrorS(err, "Failed to register configz")
return err
}
if err := setConfigz(cz, kc); err != nil {
klog.ErrorS(err, "Failed to register config")
return err
}
return nil
}
// makeEventRecorder sets up kubeDeps.Recorder if it's nil. It's a no-op otherwise.
func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) {
if kubeDeps.Recorder != nil {
return
}
eventBroadcaster := record.NewBroadcaster()
kubeDeps.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)})
eventBroadcaster.StartStructuredLogging(3)
if kubeDeps.EventClient != nil {
klog.V(4).InfoS("Sending events to api server")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")})
} else {
klog.InfoS("No api server defined - no events will be sent to API server")
}
}
func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Dependencies, featureGate featuregate.FeatureGate) (err error) {
// Set global feature gates based on the value on the initial KubeletServer
err = utilfeature.DefaultMutableFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates)
if err != nil {
return err
}
// validate the initial KubeletServer (we set feature gates first, because this validation depends on feature gates)
if err := options.ValidateKubeletServer(s); err != nil {
return err
}
// Obtain Kubelet Lock File
if s.ExitOnLockContention && s.LockFilePath == "" {
return errors.New("cannot exit on lock file contention: no lock file specified")
}
done := make(chan struct{})
if s.LockFilePath != "" {
klog.InfoS("Acquiring file lock", "path", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
}
if s.ExitOnLockContention {
klog.InfoS("Watching for inotify events", "path", s.LockFilePath)
if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
return err
}
}
}
// Register current configuration with /configz endpoint
err = initConfigz(&s.KubeletConfiguration)
if err != nil {
klog.ErrorS(err, "Failed to register kubelet configuration with configz")
}
if len(s.ShowHiddenMetricsForVersion) > 0 {
metrics.SetShowHidden()
}
// About to get clients and such, detect standaloneMode
standaloneMode := true
if len(s.KubeConfig) > 0 {
standaloneMode = false
}
if kubeDeps == nil {
kubeDeps, err = UnsecuredDependencies(s, featureGate)
if err != nil {
return err
}
}
if kubeDeps.Cloud == nil {
if !cloudprovider.IsExternal(s.CloudProvider) {
cloudprovider.DeprecationWarningForProvider(s.CloudProvider)
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
if cloud != nil {
klog.V(2).InfoS("Successfully initialized cloud provider", "cloudProvider", s.CloudProvider, "cloudConfigFile", s.CloudConfigFile)
}
kubeDeps.Cloud = cloud
}
}
hostName, err := nodeutil.GetHostname(s.HostnameOverride)
if err != nil {
return err
}
nodeName, err := getNodeName(kubeDeps.Cloud, hostName)
if err != nil {
return err
}
// if in standalone mode, indicate as much by setting all clients to nil
switch {
case standaloneMode:
kubeDeps.KubeClient = nil
kubeDeps.EventClient = nil
kubeDeps.HeartbeatClient = nil
klog.InfoS("Standalone mode, no API client")
case kubeDeps.KubeClient == nil, kubeDeps.EventClient == nil, kubeDeps.HeartbeatClient == nil:
clientConfig, closeAllConns, err := buildKubeletClientConfig(ctx, s, nodeName)
if err != nil {
return err
}
if closeAllConns == nil {
return errors.New("closeAllConns must be a valid function other than nil")
}
kubeDeps.OnHeartbeatFailure = closeAllConns
kubeDeps.KubeClient, err = clientset.NewForConfig(clientConfig)
if err != nil {
return fmt.Errorf("failed to initialize kubelet client: %v", err)
}
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = float32(s.EventRecordQPS)
eventClientConfig.Burst = int(s.EventBurst)
kubeDeps.EventClient, err = v1core.NewForConfig(&eventClientConfig)
if err != nil {
return fmt.Errorf("failed to initialize kubelet event client: %v", err)
}
// make a separate client for heartbeat with throttling disabled and a timeout attached
heartbeatClientConfig := *clientConfig
heartbeatClientConfig.Timeout = s.KubeletConfiguration.NodeStatusUpdateFrequency.Duration
// The timeout is the minimum of the lease duration and status update frequency
leaseTimeout := time.Duration(s.KubeletConfiguration.NodeLeaseDurationSeconds) * time.Second
if heartbeatClientConfig.Timeout > leaseTimeout {
heartbeatClientConfig.Timeout = leaseTimeout
}
heartbeatClientConfig.QPS = float32(-1)
kubeDeps.HeartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig)
if err != nil {
return fmt.Errorf("failed to initialize kubelet heartbeat client: %v", err)
}
}
if kubeDeps.Auth == nil {
auth, runAuthenticatorCAReload, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
if err != nil {
return err
}
kubeDeps.Auth = auth
runAuthenticatorCAReload(ctx.Done())
}
var cgroupRoots []string
if s.CgroupDriver == "none" {
cgroupRoots = []string{"/"}
} else {
nodeAllocatableRoot := cm.NodeAllocatableRoot(s.CgroupRoot, s.CgroupsPerQOS, s.CgroupDriver)
cgroupRoots = append(cgroupRoots, nodeAllocatableRoot)
kubeletCgroup, err := cm.GetKubeletContainer(s.KubeletCgroups)
if err != nil {
klog.Warningf("failed to get the kubelet's cgroup: %v. Kubelet system container metrics may be missing.", err)
} else if kubeletCgroup != "" {
cgroupRoots = append(cgroupRoots, kubeletCgroup)
}
runtimeCgroup, err := cm.GetRuntimeContainer(s.ContainerRuntime, s.RuntimeCgroups)
if err != nil {
klog.Warningf("failed to get the container runtime's cgroup: %v. Runtime system container metrics may be missing.", err)
} else if runtimeCgroup != "" {
// RuntimeCgroups is optional, so ignore if it isn't specified
cgroupRoots = append(cgroupRoots, runtimeCgroup)
}
if s.SystemCgroups != "" {
// SystemCgroups is optional, so ignore if it isn't specified
cgroupRoots = append(cgroupRoots, s.SystemCgroups)
}
}
if kubeDeps.CAdvisorInterface == nil {
imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntime, s.RemoteRuntimeEndpoint)
kubeDeps.CAdvisorInterface, err = cadvisor.New(imageFsInfoProvider, s.RootDirectory, cgroupRoots, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntime, s.RemoteRuntimeEndpoint))
if err != nil {
return err
}
}
// Setup event recorder if required.
makeEventRecorder(kubeDeps, nodeName)
if kubeDeps.ContainerManager == nil {
if s.CgroupsPerQOS && s.CgroupRoot == "" {
klog.InfoS("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /")
s.CgroupRoot = "/"
}
var reservedSystemCPUs cpuset.CPUSet
if s.ReservedSystemCPUs != "" {
// is it safe do use CAdvisor here ??
machineInfo, err := kubeDeps.CAdvisorInterface.MachineInfo()
if err != nil {
// if can't use CAdvisor here, fall back to non-explicit cpu list behavor
klog.InfoS("Failed to get MachineInfo, set reservedSystemCPUs to empty")
reservedSystemCPUs = cpuset.NewCPUSet()
} else {
var errParse error
reservedSystemCPUs, errParse = cpuset.Parse(s.ReservedSystemCPUs)
if errParse != nil {
// invalid cpu list is provided, set reservedSystemCPUs to empty, so it won't overwrite kubeReserved/systemReserved
klog.InfoS("Invalid ReservedSystemCPUs", "systemReservedCPUs", s.ReservedSystemCPUs)
return errParse
}
reservedList := reservedSystemCPUs.ToSlice()
first := reservedList[0]
last := reservedList[len(reservedList)-1]
if first < 0 || last >= machineInfo.NumCores {
// the specified cpuset is outside of the range of what the machine has
klog.InfoS("Invalid cpuset specified by --reserved-cpus")
return fmt.Errorf("Invalid cpuset %q specified by --reserved-cpus", s.ReservedSystemCPUs)
}
}
} else {
reservedSystemCPUs = cpuset.NewCPUSet()
}
if reservedSystemCPUs.Size() > 0 {
// at cmd option valication phase it is tested either --system-reserved-cgroup or --kube-reserved-cgroup is specified, so overwrite should be ok
klog.InfoS("Option --reserved-cpus is specified, it will overwrite the cpu setting in KubeReserved and SystemReserved", "kubeReservedCPUs", s.KubeReserved, "systemReservedCPUs", s.SystemReserved)
if s.KubeReserved != nil {
delete(s.KubeReserved, "cpu")
}
if s.SystemReserved == nil {
s.SystemReserved = make(map[string]string)
}
s.SystemReserved["cpu"] = strconv.Itoa(reservedSystemCPUs.Size())
klog.InfoS("After cpu setting is overwritten", "kubeReservedCPUs", s.KubeReserved, "systemReservedCPUs", s.SystemReserved)
}
kubeReserved, err := parseResourceList(s.KubeReserved)
if err != nil {
return err
}
systemReserved, err := parseResourceList(s.SystemReserved)
if err != nil {
return err
}
var hardEvictionThresholds []evictionapi.Threshold
// If the user requested to ignore eviction thresholds, then do not set valid values for hardEvictionThresholds here.
if !s.ExperimentalNodeAllocatableIgnoreEvictionThreshold {
hardEvictionThresholds, err = eviction.ParseThresholdConfig([]string{}, s.EvictionHard, nil, nil, nil)
if err != nil {
return err
}
}
experimentalQOSReserved, err := cm.ParseQOSReserved(s.QOSReserved)
if err != nil {
return err
}
devicePluginEnabled := utilfeature.DefaultFeatureGate.Enabled(features.DevicePlugins)
kubeDeps.ContainerManager, err = cm.NewContainerManager(
kubeDeps.Mounter,
kubeDeps.CAdvisorInterface,
cm.NodeConfig{
RuntimeCgroupsName: s.RuntimeCgroups,
SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups,
ContainerRuntime: s.ContainerRuntime,
CgroupsPerQOS: s.CgroupsPerQOS,
CgroupRoot: s.CgroupRoot,
CgroupDriver: s.CgroupDriver,
KubeletRootDir: s.RootDirectory,
ProtectKernelDefaults: s.ProtectKernelDefaults,
NodeAllocatableConfig: cm.NodeAllocatableConfig{
KubeReservedCgroupName: s.KubeReservedCgroup,
SystemReservedCgroupName: s.SystemReservedCgroup,
EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...),
KubeReserved: kubeReserved,
SystemReserved: systemReserved,
ReservedSystemCPUs: reservedSystemCPUs,
HardEvictionThresholds: hardEvictionThresholds,
},
QOSReserved: *experimentalQOSReserved,
ExperimentalCPUManagerPolicy: s.CPUManagerPolicy,
ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration,
ExperimentalMemoryManagerPolicy: s.MemoryManagerPolicy,
ExperimentalMemoryManagerReservedMemory: s.ReservedMemory,
ExperimentalPodPidsLimit: s.PodPidsLimit,
EnforceCPULimits: s.CPUCFSQuota,
CPUCFSQuotaPeriod: s.CPUCFSQuotaPeriod.Duration,
ExperimentalTopologyManagerPolicy: s.TopologyManagerPolicy,
ExperimentalTopologyManagerScope: s.TopologyManagerScope,
Rootless: s.Rootless,
},
s.FailSwapOn,
devicePluginEnabled,
kubeDeps.Recorder)
if err != nil {
return err
}
}
utilruntime.ReallyCrash = s.ReallyCrashForTesting
// TODO(vmarmol): Do this through container config.
oomAdjuster := kubeDeps.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
klog.InfoS("Failed to ApplyOOMScoreAdj", "err", err)
}
err = kubelet.PreInitRuntimeService(&s.KubeletConfiguration,
kubeDeps, &s.ContainerRuntimeOptions,
s.ContainerRuntime,
s.RuntimeCgroups,
s.RemoteRuntimeEndpoint,
s.RemoteImageEndpoint,
s.NonMasqueradeCIDR)
if err != nil {
return err
}
if err := RunKubelet(s, kubeDeps, s.RunOnce); err != nil {
return err
}
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
return err
}
}
if s.HealthzPort > 0 {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
go wait.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), mux)
if err != nil {
klog.ErrorS(err, "Failed to start healthz server")
}
}, 5*time.Second, wait.NeverStop)
}
if s.RunOnce {
return nil
}
// If systemd is used, notify it that we have started
go daemon.SdNotify(false, "READY=1")
select {
case <-done:
break
case <-ctx.Done():
break
}
return nil
}
// buildKubeletClientConfig constructs the appropriate client config for the kubelet depending on whether
// bootstrapping is enabled or client certificate rotation is enabled.
func buildKubeletClientConfig(ctx context.Context, s *options.KubeletServer, nodeName types.NodeName) (*restclient.Config, func(), error) {
if s.RotateCertificates {
// Rules for client rotation and the handling of kube config files:
//
// 1. If the client provides only a kubeconfig file, we must use that as the initial client
// kubeadm needs the initial data in the kubeconfig to be placed into the cert store
// 2. If the client provides only an initial bootstrap kubeconfig file, we must create a
// kubeconfig file at the target location that points to the cert store, but until
// the file is present the client config will have no certs
// 3. If the client provides both and the kubeconfig is valid, we must ignore the bootstrap
// kubeconfig.
// 4. If the client provides both and the kubeconfig is expired or otherwise invalid, we must
// replace the kubeconfig with a new file that points to the cert dir
//
// The desired configuration for bootstrapping is to use a bootstrap kubeconfig and to have
// the kubeconfig file be managed by this process. For backwards compatibility with kubeadm,
// which provides a high powered kubeconfig on the master with cert/key data, we must
// bootstrap the cert manager with the contents of the initial client config.
klog.InfoS("Client rotation is on, will bootstrap in background")
certConfig, clientConfig, err := bootstrap.LoadClientConfig(s.KubeConfig, s.BootstrapKubeconfig, s.CertDirectory)
if err != nil {
return nil, nil, err
}
// use the correct content type for cert rotation, but don't set QPS
setContentTypeForClient(certConfig, s.ContentType)
kubeClientConfigOverrides(s, clientConfig)
clientCertificateManager, err := buildClientCertificateManager(certConfig, clientConfig, s.CertDirectory, nodeName)
if err != nil {
return nil, nil, err
}
legacyregistry.RawMustRegister(metrics.NewGaugeFunc(
metrics.GaugeOpts{
Subsystem: kubeletmetrics.KubeletSubsystem,
Name: "certificate_manager_client_ttl_seconds",
Help: "Gauge of the TTL (time-to-live) of the Kubelet's client certificate. " +
"The value is in seconds until certificate expiry (negative if already expired). " +
"If client certificate is invalid or unused, the value will be +INF.",
StabilityLevel: metrics.ALPHA,
},
func() float64 {
if c := clientCertificateManager.Current(); c != nil && c.Leaf != nil {
return math.Trunc(c.Leaf.NotAfter.Sub(time.Now()).Seconds())
}
return math.Inf(1)
},
))
// the rotating transport will use the cert from the cert manager instead of these files
transportConfig := restclient.AnonymousClientConfig(clientConfig)
// we set exitAfter to five minutes because we use this client configuration to request new certs - if we are unable
// to request new certs, we will be unable to continue normal operation. Exiting the process allows a wrapper
// or the bootstrapping credentials to potentially lay down new initial config.
closeAllConns, err := kubeletcertificate.UpdateTransport(wait.NeverStop, transportConfig, clientCertificateManager, 5*time.Minute)
if err != nil {
return nil, nil, err
}
klog.V(2).InfoS("Starting client certificate rotation")
clientCertificateManager.Start()
return transportConfig, closeAllConns, nil
}
if len(s.BootstrapKubeconfig) > 0 {
if err := bootstrap.LoadClientCert(ctx, s.KubeConfig, s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil {
return nil, nil, err
}
}
clientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.KubeConfig},
&clientcmd.ConfigOverrides{},
).ClientConfig()
if err != nil {
return nil, nil, fmt.Errorf("invalid kubeconfig: %v", err)
}
kubeClientConfigOverrides(s, clientConfig)
closeAllConns, err := updateDialer(clientConfig)
if err != nil {
return nil, nil, err
}
return clientConfig, closeAllConns, nil
}
// updateDialer instruments a restconfig with a dial. the returned function allows forcefully closing all active connections.
func | (clientConfig *restclient.Config) (func(), error) {
if clientConfig.Transport != nil || clientConfig.Dial != nil {
return nil, fmt.Errorf("there is already a transport or dialer configured")
}
d := connrotation.NewDialer((&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext)
clientConfig.Dial = d.DialContext
return d.CloseAll, nil
}
// buildClientCertificateManager creates a certificate manager that will use certConfig to request a client certificate
// if no certificate is available, or the most recent clientConfig (which is assumed to point to the cert that the manager will
// write out).
func buildClientCertificateManager(certConfig, clientConfig *restclient.Config, certDir string, nodeName types.NodeName) (certificate.Manager, error) {
newClientsetFn := func(current *tls.Certificate) (clientset.Interface, error) {
// If we have a valid certificate, use that to fetch CSRs. Otherwise use the bootstrap
// credentials. In the future it would be desirable to change the behavior of bootstrap
// to always fall back to the external bootstrap credentials when such credentials are
// provided by a fundamental trust system like cloud VM identity or an HSM module.
config := certConfig
if current != nil {
config = clientConfig
}
return clientset.NewForConfig(config)
}
return kubeletcertificate.NewKubeletClientCertificateManager(
certDir,
nodeName,
// this preserves backwards compatibility with kubeadm which passes
// a high powered certificate to the kubelet as --kubeconfig and expects
// it to be rotated out immediately
clientConfig.CertData,
clientConfig.KeyData,
clientConfig.CertFile,
clientConfig.KeyFile,
newClientsetFn,
)
}
func kubeClientConfigOverrides(s *options.KubeletServer, clientConfig *restclient.Config) {
setContentTypeForClient(clientConfig, s.ContentType)
// Override kubeconfig qps/burst settings from flags
clientConfig.QPS = float32(s.KubeAPIQPS)
clientConfig.Burst = int(s.KubeAPIBurst)
}
// getNodeName returns the node name according to the cloud provider
// if cloud provider is specified. Otherwise, returns the hostname of the node.
func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) {
if cloud == nil {
return types.NodeName(hostname), nil
}
instances, ok := cloud.Instances()
if !ok {
return "", fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err := instances.CurrentNodeName(context.TODO(), hostname)
if err != nil {
return "", fmt.Errorf("error fetching current node name from cloud provider: %v", err)
}
klog.V(2).InfoS("Cloud provider determined current node", "nodeName", klog.KRef("", string(nodeName)))
return nodeName, nil
}
// InitializeTLS checks for a configured TLSCertFile and TLSPrivateKeyFile: if unspecified a new self-signed
// certificate and key file are generated. Returns a configured server.TLSOptions object.
func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletConfiguration) (*server.TLSOptions, error) {
if !kc.ServerTLSBootstrap && kc.TLSCertFile == "" && kc.TLSPrivateKeyFile == "" {
kc.TLSCertFile = path.Join(kf.CertDirectory, "kubelet.crt")
kc.TLSPrivateKeyFile = path.Join(kf.CertDirectory, "kubelet.key")
canReadCertAndKey, err := certutil.CanReadCertAndKey(kc.TLSCertFile, kc.TLSPrivateKeyFile)
if err != nil {
return nil, err
}
if !canReadCertAndKey {
hostName, err := nodeutil.GetHostname(kf.HostnameOverride)
if err != nil {
return nil, err
}
cert, key, err := certutil.GenerateSelfSignedCertKey(hostName, nil, nil)
if err != nil {
return nil, fmt.Errorf("unable to generate self signed cert: %v", err)
}
if err := certutil.WriteCert(kc.TLSCertFile, cert); err != nil {
return nil, err
}
if err := keyutil.WriteKey(kc.TLSPrivateKeyFile, key); err != nil {
return nil, err
}
klog.V(4).InfoS("Using self-signed cert", "TLSCertFile", kc.TLSCertFile, "TLSPrivateKeyFile", kc.TLSPrivateKeyFile)
}
}
tlsCipherSuites, err := cliflag.TLSCipherSuites(kc.TLSCipherSuites)
if err != nil {
return nil, err
}
if len(tlsCipherSuites) > 0 {
insecureCiphers := cliflag.InsecureTLSCiphers()
for i := 0; i < len(tlsCipherSuites); i++ {
for cipherName, cipherID := range insecureCiphers {
if tlsCipherSuites[i] == cipherID {
klog.InfoS("Use of insecure cipher detected.", "cipher", cipherName)
}
}
}
}
minTLSVersion, err := cliflag.TLSVersion(kc.TLSMinVersion)
if err != nil {
return nil, err
}
tlsOptions := &server.TLSOptions{
Config: &tls.Config{
MinVersion: minTLSVersion,
CipherSuites: tlsCipherSuites,
},
CertFile: kc.TLSCertFile,
KeyFile: kc.TLSPrivateKeyFile,
}
if len(kc.Authentication.X509.ClientCAFile) > 0 {
clientCAs, err := certutil.NewPool(kc.Authentication.X509.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file %s: %v", kc.Authentication.X509.ClientCAFile, err)
}
// Specify allowed CAs for client certificates
tlsOptions.Config.ClientCAs = clientCAs
// Populate PeerCertificates in requests, but don't reject connections without verified certificates
tlsOptions.Config.ClientAuth = tls.RequestClientCert
}
return tlsOptions, nil
}
// setContentTypeForClient sets the appropritae content type into the rest config
// and handles defaulting AcceptContentTypes based on that input.
func setContentTypeForClient(cfg *restclient.Config, contentType string) {
if len(contentType) == 0 {
return
}
cfg.ContentType = contentType
switch contentType {
case runtime.ContentTypeProtobuf:
cfg.AcceptContentTypes = strings.Join([]string{runtime.ContentTypeProtobuf, runtime.ContentTypeJSON}, ",")
default:
// otherwise let the rest client perform defaulting
}
}
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error {
hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride)
if err != nil {
return err
}
// Query the cloud provider for our node name, default to hostname if kubeDeps.Cloud == nil
nodeName, err := getNodeName(kubeDeps.Cloud, hostname)
if err != nil {
return err
}
hostnameOverridden := len(kubeServer.HostnameOverride) > 0
// Setup event recorder if required.
makeEventRecorder(kubeDeps, nodeName)
var nodeIPs []net.IP
if kubeServer.NodeIP != "" {
for _, ip := range strings.Split(kubeServer.NodeIP, ",") {
parsedNodeIP := net.ParseIP(strings.TrimSpace(ip))
if parsedNodeIP == nil {
klog.InfoS("Could not parse --node-ip ignoring", "IP", ip)
} else {
nodeIPs = append(nodeIPs, parsedNodeIP)
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(nodeIPs) > 1 {
return fmt.Errorf("dual-stack --node-ip %q not supported in a single-stack cluster", kubeServer.NodeIP)
} else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && utilnet.IsIPv6(nodeIPs[0]) == utilnet.IsIPv6(nodeIPs[1])) {
return fmt.Errorf("bad --node-ip %q; must contain either a single IP or a dual-stack pair of IPs", kubeServer.NodeIP)
} else if len(nodeIPs) == 2 && kubeServer.CloudProvider != "" {
return fmt.Errorf("dual-stack --node-ip %q not supported when using a cloud provider", kubeServer.NodeIP)
} else if len(nodeIPs) == 2 && (nodeIPs[0].IsUnspecified() || nodeIPs[1].IsUnspecified()) {
return fmt.Errorf("dual-stack --node-ip %q cannot include '0.0.0.0' or '::'", kubeServer.NodeIP)
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: true,
})
credentialprovider.SetPreferredDockercfgPath(kubeServer.RootDirectory)
klog.V(2).InfoS("Using root directory", "path", kubeServer.RootDirectory)
if kubeDeps.OSInterface == nil {
kubeDeps.OSInterface = kubecontainer.RealOS{}
}
k, err := createAndInitKubelet(&kubeServer.KubeletConfiguration,
kubeDeps,
&kubeServer.ContainerRuntimeOptions,
kubeServer.ContainerRuntime,
hostname,
hostnameOverridden,
nodeName,
nodeIPs,
kubeServer.ProviderID,
kubeServer.CloudProvider,
kubeServer.CertDirectory,
kubeServer.RootDirectory,
kubeServer.ImageCredentialProviderConfigFile,
kubeServer.ImageCredentialProviderBinDir,
kubeServer.RegisterNode,
kubeServer.RegisterWithTaints,
kubeServer.AllowedUnsafeSysctls,
kubeServer.ExperimentalMounterPath,
kubeServer.KernelMemcgNotification,
kubeServer.ExperimentalCheckNodeCapabilitiesBeforeMount,
kubeServer.ExperimentalNodeAllocatableIgnoreEvictionThreshold,
kubeServer.MinimumGCAge,
kubeServer.MaxPerPodContainerCount,
kubeServer.MaxContainerCount,
kubeServer.MasterServiceNamespace,
kubeServer.RegisterSchedulable,
kubeServer.KeepTerminatedPodVolumes,
kubeServer.NodeLabels,
kubeServer.SeccompProfileRoot,
kubeServer.NodeStatusMaxImages)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// NewMainKubelet should have set up a pod source config if one didn't exist
// when the builder was run. This is just a precaution.
if kubeDeps.PodConfig == nil {
return fmt.Errorf("failed to create kubelet, pod source config was nil")
}
podCfg := kubeDeps.PodConfig
if err := rlimit.SetNumFiles(uint64(kubeServer.MaxOpenFiles)); err != nil {
klog.ErrorS(err, "Failed to set rlimit on max file handles")
}
// process pods and exit.
if runOnce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
klog.InfoS("Started kubelet as runonce")
} else {
startKubelet(k, podCfg, &kubeServer.KubeletConfiguration, kubeDeps, kubeServer.EnableServer)
klog.InfoS("Started kubelet")
}
return nil
}
func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, enableServer bool) {
// start the kubelet
go k.Run(podCfg.Updates())
// start the kubelet server
if enableServer {
go k.ListenAndServe(kubeCfg, kubeDeps.TLSOptions, kubeDeps.Auth)
}
if kubeCfg.ReadOnlyPort > 0 {
go k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
}
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) {
go k.ListenAndServePodResources()
}
}
func createAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps *kubelet.Dependencies,
crOptions *config.ContainerRuntimeOptions,
containerRuntime string,
hostname string,
hostnameOverridden bool,
nodeName types.NodeName,
nodeIPs []net.IP,
providerID string,
cloudProvider string,
certDirectory string,
rootDirectory string,
imageCredentialProviderConfigFile string,
imageCredentialProviderBinDir string,
registerNode bool,
registerWithTaints []api.Taint,
allowedUnsafeSysctls []string,
experimentalMounterPath string,
kernelMemcgNotification bool,
experimentalCheckNodeCapabilitiesBeforeMount bool,
experimentalNodeAllocatableIgnoreEvictionThreshold bool,
minimumGCAge metav1.Duration,
maxPerPodContainerCount int32,
maxContainerCount int32,
masterServiceNamespace string,
registerSchedulable bool,
keepTerminatedPodVolumes bool,
nodeLabels map[string]string,
seccompProfileRoot string,
nodeStatusMaxImages int32) (k kubelet.Bootstrap, err error) {
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations
k, err = kubelet.NewMainKubelet(kubeCfg,
kubeDeps,
crOptions,
containerRuntime,
hostname,
hostnameOverridden,
nodeName,
nodeIPs,
providerID,
cloudProvider,
certDirectory,
rootDirectory,
imageCredentialProviderConfigFile,
imageCredentialProviderBinDir,
registerNode,
registerWithTaints,
allowedUnsafeSysctls,
experimentalMounterPath,
kernelMemcgNotification,
experimentalCheckNodeCapabilitiesBeforeMount,
experimentalNodeAllocatableIgnoreEvictionThreshold,
minimumGCAge,
maxPerPodContainerCount,
maxContainerCount,
masterServiceNamespace,
registerSchedulable,
keepTerminatedPodVolumes,
nodeLabels,
seccompProfileRoot,
nodeStatusMaxImages)
if err != nil {
return nil, err
}
k.BirthCry()
k.StartGarbageCollection()
return k, nil
}
// parseResourceList parses the given configuration map into an API
// ResourceList or returns an error.
func parseResourceList(m map[string]string) (v1.ResourceList, error) {
if len(m) == 0 {
return nil, nil
}
rl := make(v1.ResourceList)
for k, v := range m {
switch v1.ResourceName(k) {
// CPU, memory, local storage, and PID resources are supported.
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs:
q, err := resource.ParseQuantity(v)
if err != nil {
return nil, err
}
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}
}
return rl, nil
}
// BootstrapKubeletConfigController constructs and bootstrap a configuration controller
func BootstrapKubeletConfigController(dynamicConfigDir string, transform dynamickubeletconfig.TransformFunc) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate")
}
if len(dynamicConfigDir) == 0 {
return nil, nil, fmt.Errorf("cannot bootstrap Kubelet config controller, --dynamic-config-dir was not provided")
}
// compute absolute path and bootstrap controller
dir, err := filepath.Abs(dynamicConfigDir)
if err != nil {
return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir)
}
// get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist
c := dynamickubeletconfig.NewController(dir, transform)
kc, err := c.Bootstrap()
if err != nil {
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err)
}
return kc, c, nil
}
| updateDialer |
authentication.py | from rest_framework.authentication import SessionAuthentication as RESTSessionAuthentication
class | (RESTSessionAuthentication):
"""
This class is needed, because REST Framework's default SessionAuthentication does never return 401's,
because they cannot fill the WWW-Authenticate header with a valid value in the 401 response. As a
result, we cannot distinguish calls that are not unauthorized (401 unauthorized) and calls for which
the user does not have permission (403 forbidden). See https://github.com/encode/django-rest-framework/issues/5968
We do set authenticate_header function in SessionAuthentication, so that a value for the WWW-Authenticate
header can be retrieved and the response code is automatically set to 401 in case of unauthenticated requests.
"""
def authenticate_header(self, request):
return 'Session'
| SessionAuthentication |
freetype.gyp | {
'targets': [
{
'target_name': 'freetype',
'type': 'static_library',
'standalone_static_library': 1,
'sources': [
# base components (required)
'../third_party/externals/freetype/src/base/ftsystem.c',
'../third_party/externals/freetype/src/base/ftinit.c',
'../third_party/externals/freetype/src/base/ftdebug.c',
'../third_party/externals/freetype/src/base/ftbase.c',
'../third_party/externals/freetype/src/base/ftbbox.c', # recommended, see <freetype/ftbbox.h>
'../third_party/externals/freetype/src/base/ftglyph.c', # recommended, see <freetype/ftglyph.h>
'../third_party/externals/freetype/src/base/ftbitmap.c', # optional, see <freetype/ftbitmap.h>
'../third_party/externals/freetype/src/base/ftfstype.c', # optional
'../third_party/externals/freetype/src/base/ftgasp.c', # optional, see <freetype/ftgasp.h>
'../third_party/externals/freetype/src/base/ftlcdfil.c', # optional, see <freetype/ftlcdfil.h>
'../third_party/externals/freetype/src/base/ftmm.c', # optional, see <freetype/ftmm.h>
'../third_party/externals/freetype/src/base/ftpatent.c', # optional
'../third_party/externals/freetype/src/base/ftstroke.c', # optional, see <freetype/ftstroke.h>
'../third_party/externals/freetype/src/base/ftsynth.c', # optional, see <freetype/ftsynth.h>
'../third_party/externals/freetype/src/base/fttype1.c', # optional, see <freetype/t1tables.h>
'../third_party/externals/freetype/src/base/ftwinfnt.c', # optional, see <freetype/ftwinfnt.h>
'../third_party/externals/freetype/src/base/ftxf86.c', # optional, see <freetype/ftxf86.h>
# font drivers (optional; at least one is needed)
'../third_party/externals/freetype/src/cff/cff.c', # CFF/OpenType font driver
'../third_party/externals/freetype/src/sfnt/sfnt.c', # SFNT files support (TrueType & OpenType)
'../third_party/externals/freetype/src/truetype/truetype.c', # TrueType font driver
# rasterizers (optional; at least one is needed for vector formats)
'../third_party/externals/freetype/src/raster/raster.c', # monochrome rasterizer
'../third_party/externals/freetype/src/smooth/smooth.c', # anti-aliasing rasterizer
# auxiliary modules (optional)
'../third_party/externals/freetype/src/autofit/autofit.c', # auto hinting module
'../third_party/externals/freetype/src/psaux/psaux.c', # PostScript Type 1 parsing
'../third_party/externals/freetype/src/pshinter/pshinter.c', # PS hinting module
'../third_party/externals/freetype/src/psnames/psnames.c', # PostScript glyph names support
],
'include_dirs': [
'../third_party/externals/freetype/internal',
'../third_party/externals/freetype/builds',
'../third_party/externals/freetype/include',
'../third_party/externals/freetype',
],
'cflags': [
'-DFT2_BUILD_LIBRARY',
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/externals/freetype/include',
],
},
'conditions': [
[ 'skia_os == "mac"', {
'sources': [
'../third_party/externals/freetype/src/base/ftmac.c', # only on the Macintosh
],
}], | [ 'skia_os == "android"', {
# These flags are used by the Android OS. They are probably overkill
# for Skia, but we add them for consistency.
'cflags': [
'-W',
'-Wall',
'-fPIC',
'-DPIC',
'-DDARWIN_NO_CARBON',
'-DFT2_BUILD_LIBRARY',
'-O2',
],
}],
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2: | |
typing_exercise_solution.py | from typing import Callable, Tuple, Union, Optional, List
import torch
import torch.nn.functional as F
from torch import nn
class Network(nn.Module):
def __init__(self, input_size: int, output_size: int, hidden_layers: List[int], drop_p: float = 0.5) -> None:
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
''' Forward pass through the network, returns the output logits '''
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(
model: nn.Module,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module]
) -> Tuple[float, float]:
accuracy = 0
test_loss = 0
for images, labels in testloader:
images = images.resize_(images.size()[0], 784)
output = model.forward(images)
test_loss += criterion(output, labels).item()
## Calculating the accuracy
# Model's output is log-softmax, take exponential to get the probabilities
ps = torch.exp(output)
# Class with highest probability is our predicted class, compare with true label
equality = (labels.data == ps.max(1)[1])
# Accuracy is number of correct predictions divided by all predictions, just take the mean
accuracy += equality.type_as(torch.FloatTensor()).mean().item()
return test_loss, accuracy
def train(
model: nn.Module,
trainloader: torch.utils.data.DataLoader,
testloader: torch.utils.data.DataLoader,
criterion: Union[Callable, nn.Module],
optimizer: Optional[torch.optim.Optimizer] = None,
epochs: int = 5,
print_every: int = 40,
) -> None:
if optimizer is None:
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
model.train()
for images, labels in trainloader:
steps += 1
# Flatten images into a 784 long vector
images.resize_(images.size()[0], 784)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Model in inference mode, dropout is off
model.eval()
# Turn off gradients for validation, will speed up inference
with torch.no_grad():
test_loss, accuracy = validation(model, testloader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
running_loss = 0
# Make sure dropout and grads are on for training
model.train()
| ''' Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input layer
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
'''
super().__init__()
# Input to a hidden layer
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
# Add a variable number of more hidden layers
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p) |
moeda.py | def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def | (valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
#! lembra que if formato: => formato = True ||| if not formato: => formato = False
# aqui moeda fica como segundo parametro pois o primeiro a ser importado é o valor
def moeda(valor=0, moeda='R$'): # Posso mudar a moeda apenas informando outra lá na hora de importar
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
# .rjust() => direita | .ljust() => esquerda | .center() => centralizar
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
'''
print(f'\nA metade de {moeda.moeda(p, "US$")} é {moeda.metade(p, True )}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, 10, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p, 13, True)}')
'''
| diminuir |
HomePage.js | import HomeContainer from "../HomeContainer";
import '../assets/HomePage.css';
function | (props) {
return (
<div className="py-2 sm:p-2 bg-gray-200 bg-opacity-75 mb-2 sm:mx-48 rounded border-2">
<h1 className="">{global.searchable && <div></div>}</h1>
<HomeContainer />
</div>
);
}
export default HomePage;
| HomePage |
b0xfile_assets_aws_database_database-migration-service.png.go | // Code generaTed by fileb0x at "2020-09-15 07:47:28.661286999 -0400 EDT m=+1.026832864" from config file "b0x.yml" DO NOT EDIT.
// modified(2020-09-09 20:18:34.832289 -0400 EDT)
// original path: ../../assets/aws/database/database-migration-service.png
package assets
import (
"bytes"
"compress/gzip"
"io"
"os"
)
// FileAssetsAwsDatabaseDatabaseMigrationServicePng is "assets/aws/database/database-migration-service.png"
var FileAssetsAwsDatabaseDatabaseMigrationServicePng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x7a\x79\x3c\x54\xff\xf7\xff\x2c\x96\xec\xa5\x10\x42\x29\x89\x31\x84\x2c\x59\x32\xf6\x18\x4b\xb6\x91\x6b\x2b\x59\x06\x59\x87\x6c\x61\x52\x92\x25\x42\x46\x64\x19\xb2\x64\x19\xc2\x95\x9d\x4a\x92\xa5\x5c\x7b\x29\x94\x25\x5b\x92\xbd\xb1\xfe\x1e\xa3\xf7\xe7\xfb\x59\x7e\x7f\xcc\x3f\x67\xce\x79\x9e\xf3\xba\xf7\xbe\x5e\xcf\xb3\xbc\xa2\xaf\x18\xe9\xb2\x31\xf3\x31\xc3\x60\x30\x36\xbd\xcb\x5a\xa6\x30\x18\x5c\x82\xf6\x3b\xc4\x00\x83\xc1\x82\x42\xe3\xfd\x61\xb0\xf3\xce\x7a\x5a\xea\xe6\x81\x19\x3f\x9b\x42\x9e\xd8\x58\xa4\x2d\xed\xe3\xf2\x09\x0b\x65\x67\xee\x41\x3c\x54\xb6\xa9\xbe\xcc\x44\x1b\xf2\xdd\xb3\x5a\x73\xf7\xe8\x74\x0f\xbd\x66\xb9\x7c\x68\x5e\xe5\x34\x7c\x26\xc0\x99\x7d\xc2\xe3\xc7\x89\x63\x33\xdd\x71\xf1\xab\x51\x09\xa9\x93\xdf\x08\x7e\x2a\x33\x69\x89\xb1\xb2\x82\x6d\xf7\x62\xd9\x39\x23\x98\xef\x9c\xb9\x4c\x67\xc8\x98\x74\x32\x81\xe9\xcc\x84\xec\xad\x42\x3d\x8d\xa4\xb2\x8f\xf9\x9a\xf3\x2a\x21\x16\x16\xc3\x63\xd5\xc3\xd3\xb6\x21\x14\x5b\xdb\xd1\xdd\xd0\xec\xc2\xac\xb2\xea\xcd\x80\xd0\x61\xeb\x06\xb7\xf0\x60\x8a\x6d\xc9\x90\x27\xf9\xfa\x03\x94\x57\x70\xdd\xe1\x2f\x4f\x98\xd4\x3e\x3c\x47\xb9\x3c\xa6\x27\x49\xe2\x5a\x7b\xa5\xce\x62\xa7\x0a\x2f\x40\x79\xe4\x65\x23\x72\x57\xe0\x46\xc9\x90\x98\x7b\x10\x5e\xe1\xb3\x8f\x09\x77\xf1\xa5\x58\x95\xcc\x5b\x51\x1a\x3c\xdb\x19\xca\x5b\x6d\x95\xeb\x3f\x2d\x43\x6d\x04\xa9\xef\x2e\x76\xdf\x5d\x94\x72\xeb\xfb\x96\xa2\xb0\x5b\xf0\xca\x8d\x13\x4a\xe2\xc5\x9b\xc2\xc0\xbc\x31\x22\x2e\xff\xb1\x0c\xf8\x26\x5f\xb7\x59\x1a\xed\x9f\x95\x37\xf2\x2a\xc5\x5d\x93\xac\xef\xcd\x89\x04\x32\x8e\xc1\xd0\x45\x0a\x64\x0b\xb1\xc1\xc3\xc0\x99\x60\xbb\xb9\x3b\x28\xae\xc9\x43\xcf\x1f\x53\xe0\xc0\x3b\x5b\x22\xa9\x9a\x1d\x4a\x5a\x7a\x2e\x07\x9e\x13\xf1\x0e\x92\x6b\xf4\x5d\xc4\xd2\x3f\x43\x93\xdb\x7e\x9b\xf1\xfd\x1f\xec\x05\x56\x48\xb8\xe7\xdb\x8a\x03\xb8\xf2\x4e\xf8\x79\x52\x66\x06\x91\xe4\xc3\x43\x76\xa2\x20\xc1\x29\xe5\xfb\x2c\x80\xfa\x72\x62\xc3\xd7\xd7\x29\xfe\x72\xe7\x5a\xc5\x1f\x68\x38\x9e\x3c\x27\xc4\xf5\x2e\x60\xd9\x8f\xbe\x27\xec\xbb\x46\xe9\xfd\x99\xe1\xc4\x9f\xb5\xe7\xd8\x2e\x88\xe1\x1b\x8f\xce\x32\x67\xed\x16\xfb\x77\x02\x82\x23\x86\xb7\x6c\x1e\x47\x16\xf7\x08\x1b\x17\x8e\xe6\x40\x6d\x97\x66\xe2\xc3\x2a\x6e\xaa\x6e\x7f\x3e\x91\xba\x19\xb6\x75\x6e\xf5\xe1\x15\x5d\x87\x89\x48\x75\x2e\x59\x6d\x63\xac\xa4\xa6\x31\xb6\x58\xd3\x18\x5b\xc0\xd7\xa4\x4a\x39\x99\xb4\xf1\x92\x73\xfd\x05\x6e\xac\xb6\xf0\x5b\x8b\xee\x9d\x2c\x92\x0f\x4f\x30\x30\xa4\xb4\x46\x0b\xa4\xaa\x90\x4b\xab\x6f\x8c\x24\xf7\x02\x57\xf6\xf9\x99\x01\x3a\x89\x87\x2c\x92\x0a\x07\x4c\x05\xc8\x22\x66\x70\x60\xf0\x3a\x7d\xe9\xa9\xab\xf9\xc1\xbe\x88\x7f\x2f\xf9\xb8\x2b\xe4\x80\x44\x8d\x54\x21\x40\x9b\x2f\xf4\xe0\x94\x12\x06\x5b\x80\x00\x3b\x5f\x23\x70\xac\xb8\xbe\x0d\x38\xda\x5a\x80\x2c\x32\x0c\x07\x64\xb8\xc8\xda\x27\xae\x51\x1a\xaf\x27\x90\x98\x88\x24\xeb\x06\x22\xc9\x86\x11\xdf\x61\x4e\xe4\xde\xe7\xe3\x26\x4f\x7f\x90\x3a\x8b\x04\xf3\x68\xa6\x7c\xa3\x90\x4c\x24\xf7\x55\x94\xd7\x27\x5b\x08\x7c\xee\x7a\x18\xaa\x1c\x84\x03\xbe\x34\xef\x0f\xed\xf0\x69\xc1\x56\xfa\xd7\xb8\xae\x3b\xdd\x34\x49\x16\x25\x3b\x95\x20\xff\x7a\xe3\x7b\x0a\x25\x21\x50\xcc\xa8\x1b\xb1\xe9\x2a\xff\xf6\x37\xf8\xe9\x2e\x69\xfa\xad\x09\x06\x0b\x20\x21\xf1\x0e\x18\xda\xea\x08\x94\xc4\x93\x7c\x22\xf5\xf8\x0b\xdc\xed\x6b\x35\xdf\x09\x8c\x78\x3d\x77\x18\x20\xc2\x4b\x16\x41\x21\x80\x95\xdc\x5f\xb5\x8d\x9b\x66\xcb\x76\xa4\xca\x28\x7a\x7c\x9a\x3f\x91\xa4\xcf\x80\xef\x88\x21\x92\x1a\xdf\xdf\x0e\xd0\xd6\x22\x56\x4f\x20\x1b\x84\xc7\x99\x11\x80\xb3\x00\x39\x3f\x09\x0e\x54\x0a\x93\x45\xa4\x8a\xe3\xb1\xfe\x52\xd8\xde\x56\x04\x78\xd3\xef\x3f\x0c\x82\x02\xe4\x78\x46\xe1\x68\xfe\xff\xd0\x4c\x1f\x1b\x3d\x75\x8d\x2b\xea\xae\xdb\x0d\x4c\x57\x29\x12\x34\x97\xc1\x60\x71\x74\xe0\x54\xdb\xfc\x24\xa7\x20\xb7\x4c\x81\x1c\x0c\x90\x57\xc2\x60\x45\x90\xa0\xa3\x36\x06\x6b\x67\x2e\x90\x5a\xfa\xf9\x31\x0c\x8d\xfd\x0f\x84\xce\xe2\x43\xd7\x28\xb5\xcd\x17\x4e\xd0\x81\x19\x3b\x30\x74\x3c\x2b\x94\xc4\x05\x47\x07\x55\xec\xac\x35\x65\x07\x6b\xaa\x7c\x10\xe7\x62\xf8\x1f\xec\xdf\x59\xa5\x26\x39\xd2\x0c\x60\xf7\x7f\x20\x4b\xa8\x7d\xe0\xfb\x1f\xc5\x81\xb6\xee\x98\x05\xfb\xc7\xe6\x16\x63\x8e\x06\x45\xfd\xe3\x2b\x0f\xe7\x52\xe7\x77\x77\xbd\xc7\x35\x27\x25\xbf\x6e\x42\x40\xd4\xb6\x60\x67\x40\x9e\xff\xb8\xe6\xa4\x41\xc0\x1a\xf8\x75\x07\xea\x9f\x78\x0f\xf5\x27\x71\xf5\xf6\xc7\xf0\x58\xc9\x8e\xc2\xd1\x0a\x6a\x98\xd8\xd3\x07\xf0\x28\xa0\xdb\xf7\x3a\x1d\xbe\x8e\x0a\x07\x4e\xfc\x7d\xaa\x29\xef\xe7\xdb\xe8\xf0\x41\x6b\x48\x50\xf1\xaf\xb3\x98\xeb\xaa\xfd\x02\x90\xef\xea\xff\x09\x1e\x54\x46\xd2\x9d\x47\x80\x5f\xff\xb5\xa4\x76\x25\xb0\xb2\x42\x0b\x8e\xd6\x0e\x26\x92\x2e\xd2\x63\xb0\xf3\x6c\x78\x0b\xfd\xf8\xb0\xcf\x9d\x61\x98\xae\x0c\x41\xb2\x93\x11\x1c\x4a\x0a\x46\x02\x29\x49\x77\xed\xe5\xeb\x85\x20\x99\x42\x46\x7c\x9a\xd9\xbf\x36\x2d\x24\x25\xcf\xc1\x02\x4d\xe4\xfd\xa7\xec\x95\xb1\x92\xcf\xb1\xff\x96\x39\xce\x15\x36\x7f\xe0\x21\xb7\x5d\xe6\x80\x64\x0a\x61\x78\xbd\x26\x38\x45\xbf\xe3\x8d\xb1\x72\xbf\xc0\x7f\xeb\xbd\x1d\xe0\x75\xa9\x47\x02\x19\x6b\x30\x34\x81\x99\x48\x1a\xe0\x87\x8a\x1f\x1f\xaf\xbf\xff\x5e\xbe\xcd\x94\x43\x3d\x57\xb3\xcb\x04\xef\x97\x98\x69\x60\xfb\x28\x2f\x58\x8c\xb9\xc0\xde\x31\x1b\x38\xee\x81\xdd\x8e\xb0\x0a\xeb\x5d\x63\x9e\xcf\x77\xe5\xfc\xa3\x1d\x6f\xe2\xcf\xa6\xdb\xf2\xa4\x74\xbe\x16\xec\xc9\xb8\x5c\xf7\x4e\xa2\x3a\x6b\x18\xe3\xf1\xa1\x2d\xe1\xf2\x4c\xa3\x9e\xe6\xd6\xb5\x90\xdf\xde\xd1\x96\x76\xca\x86\xbb\xd4\x09\xc6\x8a\xf0\x6f\x12\x8b\xdb\xa3\xae\xcb\x2a\x7f\x9e\x0a\x9d\xbb\xd2\x93\xa3\x50\xb4\xb9\x2e\x0b\x24\xc0\x80\xd2\x7c\xd7\x35\x21\x99\x7c\xa3\xa0\xb7\x9c\x5b\xda\xb7\xb4\xc7\xf8\xca\xb2\x7f\xec\x9a\x67\x89\xbb\x65\xbe\xb3\x29\x8a\x0d\xfd\x28\xb0\xeb\xf0\x59\x9c\x43\xdb\x24\xb5\x6e\x62\x57\x99\xe5\xeb\x7a\xb1\x0c\xd3\x50\xc7\x9b\x60\xc0\x33\xcc\x95\x52\xc8\x05\x2b\x0a\xfd\x41\x37\x9f\xa1\x90\xe6\x6b\x68\x35\x5f\xeb\x8d\xa8\x96\x6f\xb8\xfb\x3e\xa0\x71\x65\x84\x0c\x1b\xf9\x38\x9e\xc1\x09\x63\x11\x7c\xbd\xea\x7d\x4d\x15\x1d\xe4\x73\xa1\xac\xd0\x02\xc1\x5d\x91\x03\xc3\xfb\xf4\x95\x48\xe1\xe9\xbd\xe7\x33\xd4\xac\x60\x6e\xbd\x8d\x13\xe3\x05\x3a\xef\x04\xc6\xe6\x15\x83\x39\x32\x73\x42\x5b\xf8\xf3\x78\xc2\x08\xfa\x6b\x9d\x76\x81\xd4\x0b\xe5\xc2\xe4\x7c\x89\x91\xf9\xfb\x48\xe0\xd4\xb5\x23\xe4\xe9\x59\x4b\x29\x48\x1c\x29\x7e\xfc\x1d\xd3\x15\x5b\x45\xf5\xd1\x77\xe7\x62\x78\xec\x2e\x89\xc0\x5e\x56\xad\x59\xf2\x12\x49\x95\xf8\x92\x12\x39\xb0\x98\xa8\x4d\x24\x3d\x1b\xf7\x56\x2f\x09\x1d\xd2\x02\x2e\x78\x71\xda\x36\x39\xcc\x3f\x81\x3f\x16\x0b\xab\x98\xa4\x53\x5d\x5d\x48\x5e\xeb\x8e\x3f\xea\x70\x95\x97\x09\x83\xb5\x02\x07\x07\x39\x01\xce\xbf\xc8\x28\x7c\x53\x7e\xdd\x9d\xb2\x66\x07\x71\x0a\x20\x9d\xff\xab\x4c\x75\xd0\xba\x71\x65\xe4\xa4\xea\x5a\x60\xf8\x31\xbb\xbf\xe0\x5c\xe0\xb1\x57\xc2\x98\xae\x5e\x29\x34\x9e\xbe\xb5\xa9\xac\xed\x22\x50\xb5\x75\xc2\x07\xcd\x7c\x5d\x48\x6d\x54\x1e\x93\xe0\xd5\xc5\x13\xf5\xf0\xaf\x26\x3d\x78\xe1\x40\xb3\x47\x14\x1c\x49\x6f\x8f\xcd\x32\x87\x15\x27\x86\xbd\x6e\xb4\x9b\xf8\x96\xa3\x30\x37\x2e\x78\x49\xa7\xc7\x2f\x3b\x8e\x83\xd7\x09\x8e\xe7\xf5\x2f\xd2\x5c\x88\xd9\x5c\x94\xf8\x43\x64\x25\xb7\xe9\xf5\x8c\x49\xc1\x06\xeb\xf9\x7b\xf2\x4b\xca\x9f\xe4\x96\x3d\x35\x9d\x62\xfc\xd8\xd7\x8c\x4c\xa1\x9a\x4c\x27\xd7\xab\x81\xaa\xdc\xdd\x1c\x76\x81\x76\x96\x5f\x82\x26\xef\xdf\x07\x53\x6d\x2e\x74\x60\x7f\xc9\x27\xaa\xf9\x90\x04\xb7\x5c\xb3\x04\xb7\x5c\x5b\x78\x6d\x88\x24\x93\xd4\xb3\x60\xcc\xab\xbe\x18\x26\xc8\x77\x0d\x8b\xa0\xd8\x4c\x2a\x02\x84\xc5\x8b\x7e\xab\xe9\x84\xc1\x60\x8e\x53\x3f\xe4\xd7\x33\x73\x7e\xa5\x1a\x1d\xc2\x2c\xaa\x7a\x03\x37\x17\xe6\x62\x8d\x47\x4a\xa7\x43\x39\xbe\x48\x65\x9c\xbc\xdf\x92\x6a\x73\x21\x71\x44\xff\xa8\xc5\xdd\x19\xd7\xfb\xb3\x3e\xbb\xb3\xc7\x55\x9b\x3d\x30\x52\x74\x60\x5c\x3d\x48\xb6\x80\x93\x70\x06\x44\xd2\xb3\x6c\x36\xb0\x9e\x7b\x66\xb6\xaa\xa6\x3a\x58\x68\x8b\x1e\x33\x42\xed\x63\x82\xe7\x67\x86\x18\xf5\x35\x23\x0b\x35\x78\xc8\x5c\x1d\xb8\x42\x4b\x4d\x7c\x87\x30\xcc\x72\x3b\x72\x92\x1e\x52\x92\x4a\x7f\xcc\xfc\xd6\x2e\x90\x7a\x6c\x77\x49\xe8\x04\xd7\x6a\xe3\x40\x07\x02\x38\x6b\xcb\x07\x75\xda\x2c\x37\x7b\x6e\x18\x04\x60\xba\x3e\xf4\xd0\x51\x6c\xcc\x48\x97\x08\xd6\xfc\xfe\x68\x4f\xd3\x90\xab\x5a\xbc\x57\xbe\x64\xf5\xc4\xb6\xb4\xd9\x25\x6f\x2f\x1c\xf7\xc4\x5b\xe8\x9e\x6f\x64\xbd\x9f\x94\xc9\x63\x98\x87\x1d\x9b\xe7\xb8\xb6\xbf\x37\x39\x99\xdb\x9e\xcf\x8a\xe9\x7a\xbe\xca\x46\xe1\x7c\x71\xee\x88\x0a\x0c\x1d\xa2\xc6\x05\x2a\x39\x1b\xe6\x49\xed\x6d\xae\x66\x16\x8a\xd2\xb5\xd6\xc1\xae\x3c\xcd\x52\x92\x0c\xbe\x22\x49\x7d\x78\x06\x9f\xe2\x3f\x7d\xdd\x71\xf8\xcb\xd9\xfe\x69\xaf\xa7\x06\x86\xc9\xa3\xc1\x45\xea\x5e\x0b\x42\xcd\x41\x57\x43\x3d\x58\x20\x99\xf9\xdb\x1a\x28\x3a\x09\x04\x68\x5e\xf9\x38\xa7\xda\xad\xcd\xbc\x3e\xd4\x96\x30\x71\xea\x1a\x94\x3c\xb2\xb6\x26\x0d\x2f\x6a\xca\x89\xfd\x5e\xc1\xfa\xee\x10\xf8\xe6\x08\x06\x45\x1c\xb6\xb0\xed\x08\xa1\x94\x37\xca\x5f\xb6\x6f\xd8\x7e\x10\xfc\xe0\x10\x9e\xd7\xbf\x99\x9c\x62\xb5\xd7\xfc\x2b\x42\xc6\x04\x83\xed\x25\xa9\xe1\x6d\xab\x1c\x8a\x8f\x33\x9c\x8d\xbe\xf6\x84\x07\xe3\xd6\x6f\xd6\xfe\x78\x11\x01\xb4\x1d\x0a\xb6\xb3\x94\x72\xcf\xe5\xb0\xc9\x7c\x36\xc4\xbb\xd1\xf4\x69\x27\xba\xca\xaf\xdd\xd2\x8e\x11\xec\xfe\x51\xa4\x81\x85\x61\x9f\x9e\x23\xe7\xe3\xbb\x76\x1d\xed\x6f\x59\xc5\x37\xff\x19\xff\x38\x7c\xe6\x6c\x02\xcc\x7c\xd3\x43\xd1\x40\x79\x33\xe3\x5a\x35\x0b\x5e\xfb\x34\x6c\x58\xb7\x2c\xdb\x8d\xad\x0c\x06\x28\x84\x17\xf0\xb4\x6a\x56\x05\xb9\x41\x7c\x08\x92\x3c\xcd\x2d\x6e\x25\xb9\x89\x12\xa4\xb2\xdb\x65\xe9\x0c\xb0\x21\x63\xc9\x4a\x67\x9a\xbf\x57\x79\xf0\x40\x31\xba\xb0\xb2\xa7\xe5\xd9\x3a\x68\xd9\x77\xcb\x2a\xd8\xd6\xd6\x2f\xcd\xde\x8f\x46\x25\xc9\x22\x66\x22\x9b\xfc\x0a\xab\x94\xfd\x75\x16\x37\x26\xb2\x48\x4a\x90\xe2\xe7\xfd\xe5\xf8\xfd\x04\x49\xc5\xec\xcd\xfe\xf4\x9d\x57\x56\xf5\x0b\xf6\x1c\x7d\x6a\xe1\x35\xc6\xdd\x69\xfa\x23\x92\x64\xae\x87\xd7\x02\x37\xff\x0c\xa9\xb0\x65\x9d\xda\x58\x7b\x30\xd7\xff\x2e\x7a\xbd\xc6\x2e\x30\xad\x85\x8f\x8e\x06\xb5\x1a\x4b\x99\x2b\xb3\x71\x18\x6c\xf5\x7d\x77\xd5\xb3\xee\xf8\x70\x69\x86\x38\x29\xf4\xbb\xd6\xf9\x9f\xdb\xfd\xc6\x8d\x40\xa3\x51\xe4\x34\xe9\xa1\x52\x99\xfd\xc5\x88\xef\xa3\x01\x42\x97\xf4\x85\xb6\x35\xca\x9f\x28\x76\x46\xab\x85\xec\x1f\x73\x53\x35\xee\x47\xb5\xfa\x76\xe9\xd8\xf6\x4d\x93\xf2\xdd\xeb\x91\x80\x73\x0e\x0b\x38\x34\xaa\xa8\x99\x8c\x11\x11\xfc\xfa\x87\x61\x77\x9d\x27\xdc\x78\x97\x5a\x35\x44\x85\x03\xb7\xea\x9b\x18\x8d\xfa\x3d\xb2\x97\x06\x37\x33\x34\x96\x13\x17\x0a\x80\x6a\x16\x7c\x87\xf9\x46\x98\x95\x1d\x3f\x91\x64\x4e\xfa\x36\x65\xf3\x32\x2f\x1a\x78\xda\xbe\x90\x74\x6c\x39\xc5\x6b\x13\xaa\xf9\xb5\x40\xe8\xe4\x80\x2a\xfb\x05\x7a\x5a\xf4\x02\x6e\xd7\x19\x97\x3e\x73\x8a\x29\x91\xf4\x3a\x36\xae\x32\x02\xd2\x4c\x6f\x85\x59\x8d\xea\xb1\xe2\xd3\x22\x5f\x8a\xba\x0c\x27\x65\x48\x5e\x3c\xf2\xfa\xf7\x52\x45\x61\xfc\x77\x4f\x95\xd2\x63\x3b\x1a\x25\x57\xce\x41\xc3\xcd\x04\x2d\x1b\x1b\x85\x36\x11\x2f\x27\x9b\xbb\x74\x60\x9c\x12\x3f\xde\x89\x2d\x0d\x0e\xb0\xe6\x7c\xe8\xf8\x2c\x8d\x6d\xe1\x53\xf7\x1a\x3f\x19\xba\xd0\x7c\x37\x47\x9c\x46\x2c\xfb\xaa\x94\xf2\xd0\x99\x17\x26\xed\x7b\xbc\x87\xc9\x4e\x03\x3e\x39\x29\x4e\x8e\x0c\x78\xe9\x7b\x5f\x93\xab\x42\x0b\xf6\x34\x9b\xbf\xf7\x2a\xc4\x0c\x97\x0a\xcf\xc1\x81\x1f\xc7\x76\x34\xdc\x97\xee\xf4\xbf\xcb\xb9\xbe\x1d\x39\xe9\x33\xae\x69\xd8\x8b\x6b\xbf\x83\x23\xa5\xf1\x92\xdb\x0e\x57\x1a\x1b\x19\xfe\xfc\x12\xae\x9f\x48\x63\xa6\xd7\xb2\xa0\x5f\x89\xdc\xbd\x90\xab\x07\x27\x8c\xac\x3a\xd6\x77\xf0\x30\x24\xd3\xd1\x77\x9f\x54\x6c\x8b\x89\x4d\x8e\xb0\xc7\x57\x20\x21\x53\x6e\x0f\x7f\x25\xbc\x58\xf3\x0f\x18\x68\x50\x09\x92\x07\x2f\x61\x65\x1c\xe9\x40\x47\x86\xf4\x63\x3b\x18\x2c\x2b\xf5\x26\xe2\x40\x43\x1c\xef\xda\x9c\x4a\x0f\x1a\x54\x5e\x20\xfb\x4f\x56\xcf\x3c\xf0\xaf\x58\x3b\x08\xe5\xab\xd1\x06\x0c\x97\x6a\xdf\xf9\xf4\x6b\xd7\xb2\xec\x1c\x0c\x9d\x26\x5e\x16\xfb\xc1\xcf\xc5\x99\x16\xba\x1e\x79\x70\x44\x25\xcb\xd3\x7e\xf8\xcd\x01\xaf\x3d\xfa\xe9\xd8\x23\xe7\x3c\x17\xbf\x9f\xad\x50\x97\x3a\xd9\x3e\x08\x07\x64\x2c\xfc\xa2\xfc\x0a\x94\x65\x65\x31\xb1\xa7\x0c\x10\x14\x0b\x54\x41\x5f\xba\x7b\xa1\x6c\x1f\x12\xec\x16\xfb\x94\xf9\x13\x5e\xda\x7c\xcb\x85\xe5\x43\xdd\x2b\x5f\x45\x7c\x62\xb6\x11\x2d\x37\x4b\x11\xe8\x4c\xfb\xd5\xdd\x0d\x43\xdf\x3f\xdf\xb1\x3e\xfa\x65\x58\xae\xdf\x45\xad\xe0\xaa\x28\xcb\x41\xa6\xfc\xad\x09\x5e\xd7\xc1\xab\xf6\xa2\xf2\x26\x0f\x24\xb3\xd0\x99\x4d\x58\x34\x08\x70\x76\x26\x72\x6b\x3e\xa1\x03\xd4\x8d\x17\xb2\x2f\x85\xc3\xcb\x5c\xe7\x6e\x05\x29\x5d\x54\x96\x4a\xd7\x1a\xdf\x6b\x97\x0f\x7f\xe1\xf0\xb2\xa4\x8f\x96\x09\xde\xf8\x77\x18\x1f\xa9\xc9\x73\xdb\xe3\x19\xa7\x71\x81\xd0\x01\x97\x3f\x58\x88\x38\xf5\xcd\x23\x5c\x29\xb3\x39\xd8\x05\x9b\x9b\x72\xd9\x0a\x8e\x17\x8b\x7d\x59\x7f\xff\xb9\xf0\xd1\x10\x71\x05\xd2\x42\xae\x76\x40\xcb\xf3\xe2\x2b\x56\x9a\xed\x77\xed\x03\x5d\x7e\x87\x15\xa5\x72\x3a\x6c\x69\x6f\xb8\xf2\xbf\x8c\x2f\x78\x37\x31\xea\x87\xa3\x3c\xd1\xd9\xa4\xa8\x69\x8d\x86\x30\x1c\x9b\xe3\x1a\xa8\x39\x3c\xf9\xa3\x10\x6a\x73\x53\xdc\x6a\x73\x4c\xdc\x24\x2e\xd0\x0a\x84\x2b\xbd\xd1\x29\xba\x6a\xe7\x3f\x30\x99\x47\x6a\xa8\x97\x26\xb4\x1d\x5e\x19\x2a\x29\xee\x13\x4b\xd0\xd2\x68\x79\x2f\xd9\xe6\xd3\xbb\xed\x3a\x66\x3c\x5d\x94\x4b\x1f\xd9\x69\x53\xec\x7e\x05\x07\x03\x7f\xee\x9a\xde\xe3\x4e\x09\xb6\xb6\xdf\xdf\x11\x8a\x96\xab\x14\xfe\x86\x00\x1d\x19\x3a\xc5\x2f\x61\xb8\x3c\xb8\xa1\xa4\x45\x2c\x12\x48\x0a\x60\x85\x7c\x57\xb0\xbc\xc0\x31\xc4\x70\x33\xb2\x70\xf6\x8e\x6e\x02\x74\xe3\x69\x02\x1c\xf0\xbd\x9b\x83\x57\x2d\xbf\x83\x43\xf7\x30\x5d\xad\x1b\xd3\x37\x4c\x67\x27\x8b\x8c\xce\xf0\x03\x69\xd3\x4b\xd4\x6a\x07\xf0\x49\xe2\xa5\xea\x5e\x27\x95\x6d\x6d\x8b\xb0\xdd\x37\xca\xfd\x36\x9e\xd4\x8e\x75\x9e\xb9\xee\xa3\xda\x61\xdf\x24\x3c\x42\xcb\xec\x5f\xb9\x7b\xcd\xda\xdd\xfb\xfa\x65\x82\x9f\x1d\x04\x0f\x97\x7c\x9f\xe5\x49\x16\x59\x89\xe5\xf7\x6b\x6a\x32\xc1\xcc\xaf\xd4\xc8\x15\xab\x9e\x98\x98\xb7\xb6\x2f\x71\x23\x51\x39\xc4\x73\x8c\xac\xdc\xe3\x24\x6a\x4a\xa9\x96\x77\xb3\x48\xc9\xf6\x6c\xc1\xc0\x34\xde\xad\x3b\xc6\xeb\xd8\x5d\x5d\xaf\xf1\x93\x1e\xa1\xcd\xf6\xac\xfc\xc8\x07\x0f\xab\xb3\x70\x65\xd2\xd8\xd4\x96\x26\x93\xaf\x6c\x4a\xc4\x94\x85\x87\xe6\x1e\x48\x55\x8a\x5f\x08\xc0\x66\x67\x64\x84\xe9\x3a\x37\xa9\xe1\xfe\x85\xb0\x5b\xaa\xbd\x67\xde\x3b\xa2\x92\x35\x54\x9a\x25\x29\xa8\xde\x53\x12\xd2\x43\x24\x5a\xfe\xfe\xd4\xb0\x6f\x5e\xb9\x33\x7f\x02\xc8\xe4\x31\xe4\x24\x8a\x8f\x57\x8e\x59\xee\x7c\x69\xd5\x9d\x3b\x3d\x6c\x5c\x55\xc6\x88\xcf\x67\xa4\x30\x10\xec\x88\xa4\x64\xfb\x9f\x1f\x06\xda\xc7\x43\xd7\x37\xc1\x9b\x1d\xc7\xb6\x7e\x2e\x7a\xe9\xdd\x16\x9d\xf5\xa5\x5b\x6f\xe0\x9e\x79\x7b\x57\x5f\xb3\x1d\x06\xc0\x1b\x11\x8f\xd6\x37\x32\x73\xb2\x9a\x5f\x1f\xa1\xa7\x6c\xa4\x1d\x2d\xda\x7d\xc3\x34\x20\x4d\x40\x54\xf7\x64\x7c\xe3\xba\x32\x1d\x38\x5d\xdd\xbf\x36\x18\x7e\xd1\x60\x3b\x2c\x40\xe9\xc8\x8b\x43\x90\x3e\x1b\xc0\xd6\xd8\x08\x43\x8b\xaa\x75\x1d\xf9\x3e\x34\x13\x6f\xb6\xb9\x88\x04\x7f\x8c\x10\x5c\x47\x81\xa8\x71\xd0\x42\x1b\xf6\xe2\x23\x35\xb9\x6d\xd2\x59\xc9\xf2\x26\x4a\xf3\xe6\xf8\x1d\xb3\x09\x76\x19\xf8\xfb\x15\xaa\xc6\x14\xf0\x64\xc7\x3f\x8f\x85\x42\xae\x39\xc6\xba\x84\xc0\x54\xcb\x29\x3c\xd0\xe5\xf3\x66\xd6\xdd\x7b\x6b\xb5\xb5\xb1\x19\xff\xc0\x3a\x9c\x7a\x08\xd3\xa5\x06\xf2\x35\x66\x67\x10\x49\x26\x05\xb1\xbe\xd6\xbb\x54\x85\xb9\xe6\xf5\x50\xd0\x75\xd5\x04\x08\xe8\x67\x95\x66\x44\x3d\x3c\xa4\x29\xf1\xe0\xe9\x70\x06\xff\xe7\x96\xd5\xfc\x89\xe2\xa7\x9a\x4d\x15\xa6\x7e\x2d\xe6\x76\x0f\xab\x56\x72\xd7\xa4\xca\x2b\x15\x88\xc6\xee\x84\xe9\x15\x57\xc8\xba\x53\x2e\xfe\x59\x41\x41\xe9\xac\xea\xa0\x12\xe2\x8a\x6a\xed\x9c\xa2\x2a\x38\x77\x75\xb9\xec\xe3\x57\x96\x8f\x71\x30\x70\x47\xa3\x24\x07\x80\xe3\x3b\xfc\xc7\xc8\x6d\x0f\xbc\xad\x55\x43\x0b\xe7\xba\xc6\xe7\x73\xb6\xc3\x02\x1f\xe0\x28\x19\x3a\x0a\xcd\x2e\xf5\xab\x57\x7a\xcf\x2a\xcd\xa4\x90\xae\xa6\xf1\x8c\x86\x11\x3e\xc8\x5c\x96\x0a\xb6\x57\x3b\xfd\x4b\xef\x4f\x77\x39\x03\xdd\xf3\x52\x63\x23\x67\x18\xb0\x47\x16\xc9\x9c\x63\xc7\x77\xf8\x87\xc5\xb3\x48\x15\x97\x8f\xd2\x35\x3d\xdd\x1e\x6a\x91\x14\x1c\x2e\xcd\xd0\x29\x79\xfb\xe1\x6a\x3f\xb5\x38\x0c\x6c\x9f\xb4\xde\x9e\x5b\x46\x07\x8d\x50\x9f\xce\xa2\xf7\x93\x33\x33\x54\x04\xc8\xd3\x8c\x40\x67\xda\x2f\x22\xc9\x46\x21\x03\x12\xe7\xbf\xf4\x92\xfa\x3c\x10\x94\x4b\x28\x2c\xeb\xf2\x45\x5c\x2b\x78\x6a\x1d\xb7\x1e\xf8\x63\x18\x09\x2e\x5a\x22\x35\x9b\xc4\x36\x3a\x3a\x98\x30\xd8\xef\x33\x42\xc0\xc8\x81\x89\x25\x07\x24\x2c\x6d\x73\x22\x8f\x31\xcc\x57\x4b\xa5\xf3\x37\xf0\xf2\x4c\x54\x96\xe5\x5c\xd7\x3d\x32\x63\xf5\x22\x15\x0e\x47\xc1\x8b\xbc\xbd\x8d\x8d\x3c\x61\x00\xb3\x5a\x0b\x64\x6a\x70\x82\x1e\x34\xa8\x0f\xbe\x83\xbb\x78\xff\xdb\xfa\xec\x91\x8a\x13\x1f\x72\xa9\x0f\x11\x36\xb1\xdc\x17\xe2\xd3\xc6\x86\x59\x5a\x2e\xea\x29\xdb\x17\xab\x08\x9f\x0a\xe1\x80\xa1\x17\x6e\x45\xf1\x03\xa5\x7f\x9d\x78\x41\xe2\x4b\xf3\x3d\x3c\xe1\xc5\x47\x55\x10\xb6\xb0\x17\xf1\x69\xc9\xcf\x09\xaf\x1d\xe7\x9f\xc8\x25\x2a\x39\x1b\x39\x8e\x4a\xb6\x64\xdd\x18\x5f\x32\xea\x6f\x46\x16\xb6\xf9\xc5\x7d\x0b\xa2\xbf\x32\x3f\x99\x44\x07\x0c\xc1\xf1\x62\x3b\x3b\x98\x92\x9c\x97\xa2\xa2\x64\x27\xf6\xea\xf3\x9f\x7b\x2c\x98\x11\xd7\x46\x43\x18\x84\xbb\xfa\x1e\x9f\x1a\xba\xd4\xa0\xd8\x23\x16\x38\x36\x24\x73\x29\x45\x88\xa1\x41\x0b\xf6\xec\xf6\xa3\x7d\xac\xe8\xdb\x9a\x60\xd5\x50\x97\x89\x26\x51\x7d\xaf\x94\x28\x2d\x1a\xc4\xd7\x9c\xc1\x93\x25\x25\x6e\xf4\x50\xd2\x42\x69\x3c\x0b\x6a\x47\xb3\x04\xe0\xb9\x73\x15\x6f\xe9\x1d\x47\x4b\x13\x83\x4c\xeb\x0a\x7c\x72\x15\x74\xa6\xeb\xcc\xd0\x29\x76\x9f\x21\xbf\x28\xba\x44\x21\x45\x54\xb0\xce\xf4\xa7\x90\x6f\x6b\x90\x5c\x41\x66\x48\xcc\x79\x71\x63\x47\xb4\x88\xf6\x77\xd6\x7c\x29\x85\xb6\x7c\x57\x4e\x7a\x72\xfe\x6f\xc2\x5d\xdc\xdd\xaf\x97\x47\x99\xc9\x22\xc3\x15\x09\x2c\xca\x7d\x51\x24\x03\xcb\x73\x86\x39\xe7\x8c\x39\x47\xcf\xde\x4f\xc9\xb9\xf6\x4a\xb3\x30\x76\xf0\xdb\x1f\x69\x7b\x46\xb2\xc8\xf0\xc3\xb3\x78\xa7\x1b\x3a\x23\x38\xf1\x20\x1f\x3f\x9b\x16\x37\xdb\xd7\x55\xf6\xe6\xd3\xc3\x52\xbd\x3e\xd7\xbb\x06\xfc\x99\x0d\x04\x86\xa5\xb6\x7c\x77\xb7\xb4\xe3\x13\x5d\x5a\x9e\x48\x24\x66\xf7\xbd\x52\x7d\x12\x46\xff\x54\xec\x18\x93\x5f\x27\x20\x38\x82\x3e\xb3\x62\xc6\x7d\xa1\xd0\x4e\xdd\xf7\xb4\x05\xa6\xab\xc8\xf6\xdf\xa5\x1e\xeb\x52\xe2\xfe\x3b\x24\xe4\xeb\x36\xdc\x35\x23\x00\xf9\xba\xd5\x38\x43\x2b\xa5\x48\x70\xaa\x6d\xa5\xbf\xb4\xef\x38\xe4\xeb\x46\x3b\xc7\x61\xe8\x14\x5a\x19\xc6\x92\xa2\xac\x86\x04\x9a\x77\x60\x68\x65\xdb\x51\xcd\x58\x7e\x22\xe9\xeb\x17\x59\xb0\x52\xe7\xc6\x3f\x90\xfc\x1d\x90\xfe\x2d\x5f\x04\x10\xa7\xdf\x17\x6a\x85\x43\x10\x49\x66\x0d\x44\x92\x15\x23\xbe\xc8\x91\x88\xab\x3d\x47\x25\x1d\x86\x56\xa6\x53\x0f\x01\x4a\x02\xe4\xe9\x01\x49\xda\x9a\xce\xe0\xc5\x9a\x45\xe8\xc0\xdb\xb4\xfc\xe0\x6f\x3d\x7a\x99\x49\x22\x53\x9a\x3c\x3d\x24\xa9\x00\x43\x7b\x85\x49\xf3\x92\x45\x06\x1f\xd2\xe1\x9d\xd6\x67\x78\xc9\xd3\x5d\x09\xfb\x73\x2e\x5b\x70\xc0\x68\xb1\x32\x01\x1b\x01\x03\x8f\x35\x70\x52\xb5\x59\x3e\x7f\x92\xf7\x44\x10\x49\xd6\x0d\x23\x30\x30\x4f\x0e\x46\x49\xba\xf9\x53\x03\x85\x92\x9b\x38\x0b\xc3\x8b\xe0\x53\xf9\x89\x24\x50\xa7\x49\xbd\x6b\xbb\x83\x09\x3f\x27\xae\xed\x5c\x8f\x04\xc4\xb9\xc8\x4e\x46\xff\x2a\xf2\xd0\x64\xae\xa7\x73\xec\xf8\x22\x73\x22\xe9\xa2\x2a\xa6\x2b\x03\x41\x16\x59\x77\xde\xd2\x73\xf9\x5f\x3d\xde\xb0\x83\x0d\xa2\x4f\x8f\x4f\xeb\xa7\x51\x23\x0c\x30\xf5\xba\x9b\x7d\xab\x40\x99\x9c\x6f\x9a\xcf\x48\xf1\x57\x90\x6b\xd8\xa6\x45\x46\x4a\xc0\xce\x34\xea\xb9\x49\xd9\xc2\x80\x21\xa5\x5f\x3e\xd2\x1f\xfd\x18\x41\xf3\x62\x3f\xb2\x5c\xc0\xc6\x19\xb5\x76\x7a\xc8\xd7\x4d\x41\x51\x72\x51\x56\xf4\x79\xa8\x01\x37\x91\x54\x1d\x4f\xb0\xb9\x4a\x2b\x97\x53\xc4\xef\x64\x9f\xaa\x97\x7c\xaa\x21\x40\xe4\xc6\xa6\x9e\x05\x7d\x1f\x5a\xd5\x5f\x1f\xbc\x50\xcd\x58\x28\x86\xe9\x02\x5c\x94\x24\xdd\xae\x1d\x21\xb7\xf5\xe6\xc3\x29\x3d\xa1\x13\xb7\x7d\xda\xf9\x30\x5d\xa5\x58\x16\x40\x68\xfb\x3d\x6b\x83\x75\x16\x1c\x30\x0a\xd3\x5f\x12\xcf\x14\xbc\x4a\x87\xf7\xf1\x6b\xce\x19\xbc\xe4\x9e\x44\xe4\xa6\x2a\x49\x12\x49\x29\xe3\xc5\x28\x5f\xbf\xf5\x95\x8c\x7a\xc1\x2e\xa9\x46\x01\xa8\x72\xb0\x28\x3e\x96\xf7\x40\xef\x36\x7e\x59\xf8\x38\x5f\xd8\xcc\x02\x01\xea\x77\x98\xbf\xad\x8e\x9a\xb5\x1a\x5d\xe6\xa9\x97\xba\xc4\x0a\xc7\x5f\xa4\x8a\x35\x61\x95\xd6\xd3\x7f\xf1\xdd\xc0\x60\xf3\x57\x58\x28\xe4\x42\x2e\x0c\xcb\x30\x18\xa8\xc4\x83\x9f\xe6\xab\x5a\x5b\xdf\xf7\xb9\x34\xfa\xc2\xad\x18\x68\x67\xc3\x74\x75\x18\xcf\xd9\x9e\x3b\x3e\xe4\x37\x7c\x1c\x09\x88\xa7\xd8\xe3\xab\xe5\x13\x0a\xc3\x77\xb3\x09\xb3\x24\x0e\x79\x33\xb3\x49\xc5\x99\x1d\x18\x25\xb0\xb4\xde\x81\x39\x74\xa9\x8c\x65\x2e\x29\xaf\xff\x78\x1c\x35\xb2\x06\x09\xbc\xac\xff\x9d\x75\xff\x32\x7d\x57\xc3\x2b\x6e\x50\x41\x1b\xf7\x67\x0e\xf5\xb0\x42\xd9\x60\xf5\x34\x6c\x68\x3e\x20\x53\x73\xf3\xeb\xb8\xee\xd3\xac\xdb\xad\x62\x2d\xb3\x97\xaa\x5f\x3c\x5a\x11\x28\x90\x15\xdc\x15\xce\xce\x9c\xe7\x63\xb6\x9d\x69\xec\x00\x3d\x3f\xf8\xe9\x5e\xf1\x59\x65\xf1\xda\x0c\x15\x9d\xae\x20\x44\xcd\x31\x58\x1e\xc4\xca\x41\x21\xd7\xf8\xcf\xb4\xf7\x14\x5a\xab\x7d\x32\xd8\x1c\x1b\xd7\x4d\xd0\x7b\x4d\x8f\xd7\x73\xbf\x27\xef\xf7\xf1\xa9\xd6\xc2\x50\xc6\x42\x85\x2e\x3d\x9e\x8d\xcb\xec\x3e\x6e\x27\xc4\x1a\xd3\xd5\x21\x55\xbc\xc2\x81\x16\xbf\xb3\x50\x25\xa4\x66\xbf\x56\x8f\xc0\x7b\x04\x4e\x9d\x5f\x9f\x19\x07\x3f\x7a\x95\xaa\xbb\x0f\xae\x88\xf9\xb5\x56\xcf\x5c\xd9\x8e\x8c\x63\xca\x19\x82\xd3\xec\xee\x91\x6e\xe7\x32\x91\xa7\xa7\x78\x2c\x2c\xd4\xbb\x6e\xd0\x7d\x03\x02\xcc\xaa\x02\x6f\x6e\x4f\xfb\xc0\xcb\x29\x2d\x2a\x9e\x9b\x4a\x3a\x17\xc1\x11\x17\xa3\x3c\xb9\x92\x42\x93\xe1\x89\xb3\x6e\x6e\x53\xa1\x62\x9e\x47\x3b\xe2\x63\xfb\x30\xb6\xf4\x10\x67\x4e\x16\x2f\x23\xfe\x22\x55\xdb\x22\xfd\x68\x9f\x99\x9f\xad\xbc\xb6\xca\x0a\xd2\xc5\x75\x41\x2a\xa1\x77\x68\xb1\xbd\x28\x64\x13\xb6\x3e\xd3\xc3\xb7\xca\xef\x01\xcd\x67\xa8\xa5\x91\xd0\x9e\xb9\x65\x6d\x17\x1b\x3a\xb2\x99\x90\x50\xd2\x42\x1f\x91\xb4\x37\x4a\x07\x66\xec\xe4\x8a\x0b\x82\xbf\x9a\xf6\x4f\x8e\xae\xbc\xe6\x7e\x66\xc4\xa5\x2e\xe5\x30\xd8\x49\x78\x7b\xfa\x41\x66\x03\x23\x5d\x6d\x7d\xc0\x74\xdb\x78\xef\x21\x4a\x7c\xda\x51\xfa\xbc\x88\xf6\x5c\x6a\x72\xd3\x7a\x5e\x16\xd5\x22\x4b\x32\x3c\xe4\x28\x96\x05\xdf\xe1\xdf\x9c\xe3\xb4\xb7\x89\x3c\x88\xe2\x87\xf4\xf7\x26\x35\xc5\xcb\xd6\x35\x42\xcf\x7a\x42\x57\x12\xde\xaa\x48\x0c\xcf\xda\x40\xe7\x04\x17\x3c\x95\x8c\x1f\x77\xc1\xbe\x34\xec\xe7\x05\xae\x9d\x38\xf1\xb8\x39\x2e\x38\x12\x57\x26\x86\x5d\xac\x3a\xdb\x97\x4b\x7d\xe8\x50\x3b\x73\xf4\xca\xf4\xa1\xe0\x22\x7b\xaa\x5a\x28\xb8\x48\x55\xd9\xfd\x19\x42\xf8\xb4\x0e\x43\xd7\x7d\x14\x05\x03\x77\x9d\x30\x5d\x1d\x3d\xf2\x1b\xcd\xc1\x49\x7b\x3b\xe5\x1b\x7d\x72\x4b\xe9\x46\xd9\x7f\x56\xe5\x5b\x77\x50\x0d\xe7\x90\x12\x27\x94\x37\x59\x0f\x79\xd3\x27\xc0\x1c\x76\x91\x5e\xdc\x21\x37\x5a\x0f\x51\xea\xaa\xce\x32\x1d\xf6\xef\x4e\xbf\xec\xf1\x61\x92\x3e\x23\x62\xc6\xd5\x26\x8a\xb9\x3d\x40\x48\x45\x83\xc3\x53\x45\x7f\x17\xe9\x72\x74\x90\x11\x9c\x52\xb2\x87\xdc\x2e\xcb\x9a\x60\xb0\xf9\x93\x6d\x37\x8c\x4f\x1c\x19\xce\xf5\xbc\x3d\xce\x0a\x6c\x37\x13\x9a\x22\x50\x27\x32\x57\xf9\x8b\x08\xcd\xb7\xc7\x2f\xc1\x5f\x54\x4d\x00\x57\x61\x55\xbe\x3f\x47\xc3\xf5\x6f\xcb\xf2\x86\x72\xd4\x4a\x29\x6a\x66\xb2\x9c\xe5\x48\x41\x9c\xa4\x4f\x38\xe9\x06\x17\xb1\x52\x69\x9e\x56\x0c\xdd\xbc\xd3\x3a\xd0\x1f\xf0\x8e\xfa\x10\x09\x4e\x29\x09\x41\x6e\x97\x95\x0f\xb0\xb5\xc3\xf7\xe2\x8e\x9f\xf5\xfc\x93\xa0\x09\xde\x9c\x3b\x56\xbf\xae\x89\x9e\xf8\xf2\x1a\xd0\x0e\xbc\x3a\x52\x7d\xe6\x35\x47\xc4\x82\x62\x44\xce\x38\x92\x3f\xa1\x6f\xd2\x58\x08\x15\x11\x83\xe4\x75\x86\x01\xbe\x99\x28\x90\xcf\x2d\xf1\x1c\x39\xbf\xcf\xe7\xd9\xc9\x60\xd9\x04\xd5\xe5\xfb\x81\x12\x21\xed\xc7\x80\xd0\x69\xbf\x65\xca\xc9\x37\x97\x36\x04\x14\x0d\x39\x6e\xde\xd6\xb4\x79\xd6\x19\x15\x15\xf6\xf5\xbb\xfb\x99\xdb\x1c\x4f\x53\x18\xe6\xc5\x6f\x1c\x02\xd8\xaa\xf5\x8f\x41\xa6\x73\xb5\x26\xd2\x9f\xb7\x7e\xfa\x48\x9b\x77\xcb\x3d\x3c\xda\x54\x56\xbd\x94\xf9\x7d\x1a\xb1\x8b\x8f\x1f\x1b\xee\x81\x0a\x0a\xe3\x7b\x0b\x0a\x13\xcc\x8c\xc7\xaf\x4d\xb1\x45\x37\xca\x49\xe3\x7d\x10\x50\xe5\xa0\x48\x24\xee\x67\xae\x32\x59\xae\x1b\xaa\xfc\xcd\xb2\x59\xdf\xf9\x1b\x08\xfb\x6c\x0e\x80\x2f\xa4\xab\x17\x6c\x4f\x8b\xdd\x6a\xf6\x8b\x78\xa9\x9a\xca\xa3\x5a\x24\x9c\x37\xa5\xfa\xfc\xd3\x29\xe3\x35\x0e\x06\x9a\xd5\x1d\xd2\x9a\xb5\x06\x06\xa5\x8d\xf7\x11\x56\x0f\xbb\x1e\x48\x5f\x81\xbf\x7e\x14\x0d\x40\x5a\xa9\x92\x37\x63\x42\x51\x8a\xc0\x8d\xf1\x3b\x66\x20\xa1\x87\x69\x7e\x8c\xa0\xb1\x93\x12\xd6\x5f\x78\x7c\x43\xe4\x3e\x37\xe1\x38\x13\xbe\xc3\xff\x25\x64\x6a\xe4\x0e\x03\x4d\x23\xb8\x15\x4e\x84\x78\x3a\xe4\xce\x96\xa9\x8c\x86\x84\xfb\xb8\xc0\x5e\xd4\x55\x89\x32\x3f\xa8\xae\x8e\x13\x5a\xd6\x7c\x7f\xed\xc1\x30\xc1\xfb\x05\xdd\x73\x38\x1c\x25\x5d\xa2\xe6\x2b\xa1\x78\xe5\x3c\x26\xf6\x28\x8d\x11\xe4\xa1\xbe\x8f\xad\x08\xd0\x8d\x03\xbd\xc7\x51\xb0\x38\x82\x46\xde\x6c\x3d\x1d\x15\xb5\xf1\xf8\x3b\x97\x4a\xc4\x4c\x9b\x5e\xda\x3a\x7d\xe5\x58\xb3\xfa\x32\x5b\xd6\x93\xa9\xad\x2d\xde\x91\x9a\xfd\x63\x8b\xaa\xde\x49\x2b\x41\xf4\x86\xb5\x3c\x1f\xeb\xa0\x09\x06\x1a\x79\xf0\x00\xef\xa2\x9f\x11\x49\x5e\xf8\xb4\xd3\x68\xbc\xc9\xa0\xcf\xdc\x58\xa6\xe8\x88\xed\xd0\x5a\xca\x9e\xf3\xdb\xf5\x17\xf1\x56\x47\xb5\x53\x60\x8e\xea\x8f\x5f\xe3\xe2\x3f\x52\x3d\x5b\xbb\x1d\x12\x9e\xd7\xbe\xb9\xf7\x64\xd2\xff\x9b\xb9\xcf\xf6\x5e\xf2\x40\xb6\xd4\x03\x77\x23\xcd\xe1\xcc\x37\x89\x4c\x6a\xd5\x77\x8f\x58\xd2\x28\xaa\x95\x03\x3d\x64\x10\x80\xe9\x12\x05\x03\x4d\x50\xf8\x0e\x3f\xf8\xe5\x18\xd9\x51\xd7\xd9\xf2\x0b\x8a\xad\x52\xc5\x9e\x58\xc2\x35\x89\x28\x82\x9c\xad\x31\x8d\xf5\xcc\x1a\xce\xe2\x2b\x86\xfd\x8c\x09\xd1\x16\x1e\x6c\x29\xd9\xcd\x9c\xb2\x99\x11\xd5\xf6\xd5\x65\x8f\xde\x27\xf0\xec\x79\x0d\xcd\x53\x6b\x8f\x3d\x8e\x7c\x1e\x57\x34\xb9\xa5\xdd\xbf\xba\xc0\x73\x51\x67\x9e\x6f\xeb\xf2\xfa\x41\x9b\xfe\x8f\x6a\x40\x14\xa1\xce\xa9\x44\x6e\x14\x8e\xae\x63\x87\x64\x58\x74\xc5\x41\x4e\x46\xf2\xf4\x40\x59\x31\x6e\x68\xe8\x38\x94\xb4\x00\x43\x57\x79\xfb\x69\x74\x8d\x72\x40\x49\x0b\x8e\xd5\x95\x7a\x71\xff\x28\xc2\xd1\x91\xac\x50\xd2\x7c\xb1\x7b\x5b\x38\xb7\x3d\x63\x74\xbc\x2b\xde\x55\xad\x84\xc6\x5a\xe3\x07\xcf\xa4\xcb\x98\x11\x4d\x55\x12\xf4\x7f\x74\x8e\x9c\x3f\x42\x63\xa9\x92\x53\xa0\x4a\x0b\x3b\x06\x5b\x86\x4a\x1d\x60\xe1\x18\x54\xcf\x7d\xc6\x09\x7c\x85\x81\xa5\xbb\x30\x74\xd1\x9d\xb3\x91\x89\x8c\xe8\x5d\x7d\x57\xff\x47\xb3\x9f\xee\x72\x97\xd3\x2a\xd1\x2c\x46\x35\x4f\x18\x20\x62\x55\x0f\xc9\x48\x2d\x5e\x65\x8c\x46\xfc\x9b\x00\xcf\xfb\x92\xdb\xac\x63\x5f\xb2\x73\x0a\xa8\x1e\xf8\x9a\xb0\x44\xe1\xdb\xdc\x68\xca\x4a\x12\xd7\x7a\xfc\x84\x7e\x69\x31\x41\x7d\xcf\x61\x68\x42\x30\x91\x74\x31\x72\xcb\xe5\xce\x20\xed\x41\x7e\xe4\x80\x92\xba\x60\xe8\x10\x01\xb2\x05\xeb\x02\x8d\x3c\x33\xb3\x13\xb0\xa7\x89\xa4\xa7\xf6\xf5\x50\xdf\x6d\x49\x38\x25\xa9\x3b\xdb\x3e\xf7\xe0\x0f\xa1\x83\x61\xc2\x46\x7d\x05\x3b\xe5\xe6\x0e\x6c\xd8\x25\x57\x66\x14\x7f\x6f\x88\x22\xe4\xe6\xbf\x81\x04\x15\xe7\xd2\xfc\x33\x57\x45\x4b\xe4\xf8\x08\x18\xac\x08\x61\xf3\xa8\xaa\x04\x06\xdb\x4c\x50\x24\x0f\x4a\xbb\x54\xf2\xa8\xd1\x42\xb7\x1b\x96\x26\xe4\x06\xb7\xe4\xd8\xd2\x14\x56\xf9\xf9\x38\x11\xc0\x4a\xe6\x58\x8e\xd3\x03\x27\x6c\x21\x02\x18\xac\xbc\x4e\xa4\x1d\x5d\xee\x65\x99\x65\x1b\xd7\x40\x16\x7c\x91\x63\xc7\xae\xf0\xf0\x0b\x18\x9a\xfa\x11\xd0\x5c\x20\x64\xd7\x63\x24\xcc\x58\xf1\x89\xb9\x94\xb5\x1c\xff\xed\x0f\x1f\x06\x82\x05\xdd\xd5\xed\x3c\x1a\x88\x24\x7d\x52\xb7\x94\xbc\xcf\x31\x28\x69\xa9\x54\x04\x54\x72\x36\xce\xed\x09\xdd\x42\x28\xab\x61\xba\x46\x8c\xe9\x0f\x4a\xe5\x41\x54\x41\xf1\x82\xd5\x4b\x1a\xe4\x46\x58\x1f\x6d\x2d\xb7\x0b\x85\xfe\xe1\x55\xfe\xf0\xb8\x2c\x5c\xb5\x84\x13\x7e\xb4\xb9\x2c\xed\x12\xe1\xe9\xaf\x58\x0e\x4d\x18\x80\x53\xb2\xe6\x3e\x47\x4b\x2f\x56\x32\xab\xef\xe1\xa0\xfe\xfa\x6d\x7e\x82\x8f\x61\x09\x40\x42\x80\xbe\x86\x56\x5b\x85\x2d\x2d\x44\xcf\xab\xa1\xf1\x67\xd5\x9d\x69\xba\xfc\xdc\xf8\xff\xd3\x75\xef\xa2\xdb\xfa\x68\x4b\x98\x1b\xe2\xa8\x31\x35\x0a\xd9\x0e\xc2\xa0\x8a\xc6\xe5\x50\x85\x7d\x9b\xfb\xc3\x27\xba\x5c\x59\xef\xc3\xc0\x52\x3f\x1b\xee\x0b\x5a\x71\x22\x35\xd3\x62\x6f\x7f\xdd\x57\xf6\xf5\xe4\x3f\xd6\x95\x39\x59\x58\x3e\x45\xe5\x76\xd8\xa9\x9d\x6e\xa5\x80\x5b\xc6\x05\xfd\xaa\xe3\x4b\xf9\x0f\x8d\x4f\xf8\xb8\x05\x28\x7a\x9d\x7a\xb7\x82\xca\x0b\x2a\x29\xd5\xbf\xde\xbb\xa3\x03\xd4\xdc\xba\x7f\xfa\xf9\xcf\x2f\x0f\xb3\xcd\x3c\x6b\xce\xc4\x76\xe8\x2c\xa3\xdf\xf2\x29\xbe\x23\x92\x6e\xbb\x50\x72\xb8\xd6\x5e\xfe\xda\x19\xa0\xcf\xad\xf1\xef\xe1\x08\x76\x9d\xec\x71\xfa\x86\x00\x0d\xc2\x62\x7f\x18\x17\xf8\xbb\xf7\xac\x7e\x7a\x86\x00\xe3\xbe\x47\xd3\x2f\x2f\x79\xc0\xd1\x0e\xe6\x7c\xf2\x78\x13\x5e\x9b\xec\x46\x4c\x77\x0c\x47\xb0\xf5\x42\xf8\xce\x16\x64\xfb\xd8\x63\x44\xc0\x1d\xaa\x77\x6e\xbb\x00\x54\xfb\x16\xaf\xda\x96\xf0\x76\xb8\xdd\x82\x03\xce\x69\xe9\xf4\x2b\xd1\x67\x89\xdc\xd2\xe2\x22\xca\x17\x55\x75\x60\x65\xd6\x97\x06\xc4\x05\x69\x7c\xd4\x9e\xa5\x68\x60\x9d\xa5\x7a\xd6\x2b\x20\x4e\xa3\xe4\xc6\x90\x16\xb0\x9a\x35\x47\xab\x1c\xdd\xdc\xa6\xd0\xfb\xce\x5c\xf1\x8f\xe0\xa0\xb9\xcc\x45\x96\x01\xd9\x49\x18\xda\xc1\xbc\x52\xed\xa2\x59\x8c\x13\xb6\x30\x46\x37\xec\x9b\x21\x9c\x42\xf9\x92\xd0\x17\xcf\xb3\x3a\x90\xad\xe6\x6e\x70\x1d\x54\x77\xef\x8d\xd0\x99\x07\x64\x22\x49\xe8\x0e\x13\xf7\xd5\x40\x8b\xc9\x3d\x04\x24\x33\x5f\x1b\x66\x62\x95\x41\xbf\x55\xd4\x48\x73\x1f\x14\x39\xa4\x3a\xbe\x94\x63\xad\xf6\xd5\x79\xa0\x28\xb7\xc8\xe7\x84\xed\x6a\xd9\xdc\xc4\x11\xad\xcd\xb2\xc6\xc0\x9f\x81\x15\xf6\x3b\x02\x0f\xa1\xf9\x27\x72\x7a\x98\xe2\x9d\xdf\x83\xc9\x23\x1b\xb7\xd5\xfc\xd1\x1f\x97\xdf\x12\x49\x26\xed\xeb\xb1\x9d\xb9\x12\xe7\xf7\x37\x60\x90\xb0\x8e\x48\x6c\xb2\x99\xd0\xe6\xce\xb8\xe0\xc8\xd6\x4f\x93\xad\xaf\x85\x52\x79\xf1\x9f\x7f\xf1\x44\xf2\xa0\x6f\x72\x26\x58\x5f\x1a\x10\xee\x74\x22\x2e\x86\xae\xd3\xed\x6e\x0c\x26\x0f\x10\x7e\xfb\x9d\x6b\xd8\x8a\xc2\x49\x2a\x18\xb1\xae\x72\xa3\xf1\x7c\x88\x94\xa5\x1c\xbb\xb0\x6f\x12\xa1\xd4\xec\x10\xfc\x02\x61\x77\xed\xed\x6e\x78\x77\xf9\x21\x3c\xaf\xbf\x4d\xcb\xf3\xd2\x37\x88\xb1\x84\x7c\xda\x5a\x1d\xb2\x32\x7e\xfc\x59\x44\x89\xce\x3e\xcb\xbe\x9d\x36\x64\x37\xac\x21\xb4\xeb\x8d\x92\x93\x9b\xfb\x61\xff\x59\x9b\xf7\xda\xf7\xd9\xb1\xcc\xc2\xac\xb1\xfe\x01\x82\xf7\xa3\x5b\x3f\xe7\x79\x20\xce\xa6\x8a\xeb\xf7\x70\xed\x3f\xf5\x1c\x64\x5c\x7e\xd7\x50\x82\x1a\xff\xf0\xc3\xc4\x53\x16\xae\xe4\xd4\x7a\xb5\x9e\x0a\xfe\xd6\x7b\x3a\x25\x07\xf4\x80\x01\x3a\x02\xc3\x52\xe2\xa8\x4a\xf2\xeb\xc1\xe3\x50\xd2\x92\x5f\x24\xee\xf0\x42\xb8\xdf\x23\xf9\xbd\x9f\xe7\x29\x4d\x65\x69\x9e\xc1\xfb\x41\x76\xaf\xb9\xe2\xc9\x35\x1a\x2c\x31\x87\x1b\x22\x01\xe1\xed\x5b\xbf\x3e\xc6\xb6\xec\x52\x15\xb4\x13\x4a\x4f\x20\xf0\xc2\x1a\x52\xa6\xdb\xc1\xb2\x89\x2e\x02\x55\xa2\xdc\xe1\xb7\xb4\xb4\x3c\xd0\x93\x45\x94\xa0\x88\x40\x41\xa2\x98\xc7\xb9\x0e\x87\x7e\x33\xdd\xb9\xe6\xf5\xa8\xe7\x85\x5e\xd0\x6e\xf0\x69\xb2\xd3\x80\xcf\x6e\x6e\xa5\x46\x51\x14\x3d\xfe\x33\x75\xf0\x30\xb0\x4b\x95\x75\x78\x52\xbb\xa7\x1b\x0a\x6e\x8d\xce\xf4\x8d\x6c\x66\xeb\x7a\xa0\x8f\x7e\x35\x40\xe7\x12\xd4\xf3\x15\x86\xfc\xad\x66\x19\x5f\x0b\x95\x5a\x31\x17\x69\x9f\x5f\xfb\xf2\x3b\xf9\x34\xe8\xba\xda\x2b\xfc\x5c\x51\x54\x5a\x01\x86\x7e\x16\xbf\xc0\xc3\x3e\x69\x10\x50\x26\x4d\x16\x19\x9f\x43\x02\x77\xec\xa1\x56\x26\x32\x97\x0a\xa9\x85\x90\xfe\x64\xbd\xf9\xe6\xdc\x05\xcb\x49\x69\xde\x6d\xf6\x8c\x5c\xc6\x30\xdf\x7b\x2f\x2f\x05\xa8\x28\xad\x4d\x6a\xdc\x79\x63\xa1\x31\xbc\x3b\xa1\x0c\x47\xdf\x8d\x5f\xb0\x64\x1f\xfa\x97\x3d\x0b\xe0\xd4\x82\x6e\x51\xd4\xb7\xce\x52\x15\xbf\x6f\xde\x35\x2e\x6d\xb8\x56\x7f\xdb\x86\x2c\xf4\xa1\xf7\x8b\xc2\xe5\x04\x8f\x4b\xc0\x9c\x61\x85\xf1\x2d\x02\x0c\xa8\xbe\xcb\x1d\x52\xa0\x6c\xc9\x88\xef\x08\xa8\xcc\x71\xf2\xc9\x51\x9e\xf5\xf7\xd8\xa1\x5e\x5f\x3e\x74\x3f\xda\xab\x41\x51\xb7\x6b\x4c\x9a\x4b\xb5\xdc\xf4\xe7\x97\x70\x7d\x4c\x71\xc8\x26\x6c\x71\xd4\xbe\x71\xef\x87\x9f\xaf\x1b\x02\x2a\x66\x03\x4b\x9d\xeb\x09\xfe\xb4\xad\x75\x67\x93\x43\xb9\xb7\x86\xa0\xca\x68\xdc\xff\x4c\x24\x90\xbe\x62\x76\xe2\x84\x6d\xfd\x79\xd1\x3d\x5c\xf8\x01\x11\xef\x25\x0f\x34\xb9\x60\x60\xc5\x21\x9b\xd1\xc5\xd9\x28\xae\x56\xea\xd4\xc0\x2c\x8c\xb2\x96\xe3\xa4\xda\x9f\x59\x82\x04\x3f\x29\x1f\x35\x4f\xd4\x44\xd9\xfc\x0c\xab\xb0\x57\x99\xdd\x6e\xcb\x84\xd7\xfe\xf8\x98\xa6\x15\xcb\xfd\x03\xff\xeb\xa7\xde\x84\x8b\xea\xe9\x8b\x2b\x48\x17\xed\xf2\xa2\x90\x61\xe2\x13\x52\x45\xc0\x7e\x91\x30\x59\xb8\xbd\xbb\xf5\xa0\x88\x77\x26\xeb\x9d\xba\xc8\xc2\xc7\x99\x99\x61\x20\x40\x6e\x1b\x13\x1b\xbe\xa8\x81\xb2\x11\x47\xde\xdc\x4d\xb0\x17\x1a\x39\xfa\x6a\x01\x4d\x99\x6c\xf3\x96\x09\xcc\x72\xe7\x13\x2d\xe0\x1a\x30\x5b\xdb\x60\x8b\xa1\x5e\x87\x67\xbc\x1d\xb7\x34\xe6\xfe\x7e\xe1\xf0\x92\x6a\xf4\xef\x50\x5f\x5f\xe4\x41\xe3\xef\xf2\xbb\x08\x06\x4d\x80\x48\xa2\x64\x1f\x91\xc9\xb8\xcc\x08\x0c\x1a\x37\x04\xac\x9b\x6d\x96\x55\x16\xd4\xac\x29\xc8\xd2\x95\x89\xd5\xf9\x68\xf2\xac\xb6\xd9\x14\x0d\xe9\x97\xdf\xec\x45\xd4\x5c\x77\x3e\x7f\x1b\xc5\xf1\x79\x6b\xd5\x25\xea\x47\xe0\xfa\x1a\x9b\xe9\xd2\x6b\xf5\xf9\x39\x94\x91\xe3\x43\x5d\x9d\xad\x0b\x9c\x34\x92\x5d\x67\xa5\x30\xe8\x1c\x40\x46\xf5\x84\x51\xc8\x22\xab\x2c\x18\xd3\xac\xe3\xf5\x4e\xdb\xd2\x4e\xf7\x79\xd9\x55\x8a\x03\x64\xfc\xe7\x6b\xa2\xa9\x4a\x30\x74\xe1\xad\x05\x2e\x20\x51\x6e\xb4\x7a\x7f\xbf\xce\x32\x8e\xab\xfe\x13\x4e\xb7\xa5\xb6\xbb\x3c\x7b\xd5\xe7\xfb\xad\x82\x35\xbd\xf3\xbb\x03\xf5\x11\x2e\x2d\xcf\x3b\xcb\xed\x03\x0b\xf7\x3c\x1c\xb6\xb8\x0e\xe8\xf9\x86\xee\x19\xd5\x45\xf8\x54\x75\x09\xd4\xd6\x23\xa6\xf0\xb7\x6c\xc6\x24\x23\xf6\x33\xe2\x28\x07\x5d\x94\xbf\xe5\xde\x08\x3d\xa8\x28\xf6\x3b\x3e\x08\x87\x83\x1f\xb0\x73\x8a\x9b\x84\x66\x6c\xc0\xc1\xd8\xbf\xbb\x5d\xa5\x44\x86\xe1\x60\xda\x9c\x8f\x00\x0d\x1a\x60\x68\x23\xe6\xad\x1b\x82\xfb\x56\x2a\x64\xa7\x81\x7c\x26\xb4\x94\xa8\x28\xd9\xe9\xf1\xf2\x41\xf7\xe3\x9d\xad\x20\x59\xc4\xbe\x1d\x09\xc9\xcc\xd3\x30\xe2\xd4\x51\xaf\xf5\x58\xf1\x3e\xa6\x8f\x16\x06\x58\x38\xe0\x80\x4e\x66\xea\x64\xd2\x73\x25\xf2\xf4\x14\x23\xfe\x62\xb5\x72\x06\xd4\x57\xfe\x98\x01\xcf\xeb\xbf\x97\x33\x4d\x9d\xa2\x03\xe3\x64\x6e\x0d\xa7\x7e\x96\x20\xb7\x7d\x4a\x39\x0c\xfc\xc8\x17\x80\xa3\x9f\xb1\x43\xa6\x45\x30\x74\x32\x3b\x34\x91\x77\x45\x6c\x79\x28\x97\x1e\x8c\xfb\x27\x0e\x25\x4c\x6c\xb2\x46\xc5\x63\xa7\x16\x5d\x22\xc9\x64\x66\xb2\xd6\x26\x78\xf9\xb1\xb7\x88\xb3\x3f\xad\x52\xf3\xe9\x43\x03\x8f\x7e\xb3\xf7\x12\xb9\x5b\x8d\x5d\xc9\x7a\xd7\xf1\x05\x0d\xf5\x6c\x29\xd2\x64\x91\xe1\x3a\xc1\xd4\x3a\x42\xb1\x25\x3b\x3d\x68\x50\x0f\x92\x53\x8e\x5f\x65\x56\x4b\x66\x87\x64\x2c\x3b\x7c\x47\xae\xf0\x22\x01\xdf\xcc\x32\x1d\xbb\xca\xe7\x36\x8a\x43\x76\xe3\x32\x7e\x0a\xbc\x49\x8a\x08\xc0\x37\xd3\x0b\x3f\x9d\x5e\xe9\x37\x19\xf2\x71\x30\x2d\x46\xae\xee\x76\xc6\x4f\x22\x69\x00\x03\x88\x74\xed\x7b\xbf\x7f\x08\x47\xd7\x19\x59\xdc\x68\x67\x69\xaf\xe6\xf6\x40\x1f\x9d\x4d\x0e\xa7\xea\xca\x10\x9c\x95\x88\xa4\x64\x7b\xd7\xf1\xcc\x9a\x96\xb6\xc0\x86\x8a\x5f\xa3\x48\x67\x4d\x4c\xec\xf7\xc2\x0b\x0f\x2c\x3a\xde\xb9\x21\x01\xa3\xb0\x82\xc9\x95\x35\xa9\xb1\xf3\x40\xe9\x5e\x32\x2f\xb8\x35\x2a\xeb\xf8\xa3\xdb\x0f\x86\xe6\x69\x51\xf6\x52\xcb\xae\x27\x19\xf4\xf2\x46\x7f\xda\xea\x9a\xf4\xf5\x32\xac\x22\x72\xdf\x48\xfd\x61\xd8\xad\xef\x4e\x33\xab\x9f\xac\x9c\x4e\x53\x88\x6b\x7d\x73\x72\xef\x47\xb9\x73\x9f\xe8\x3e\x42\x95\x02\x68\xa0\x8e\xc7\x8c\x53\x97\x57\x5d\x07\xf5\x57\x0d\x55\x22\xbe\x7f\x1b\x9a\xd9\x3a\x9a\x95\x41\x24\xf9\xa4\x4e\x18\x76\x8b\x1f\x18\x16\x4f\xf6\x65\x87\x27\x87\xfd\xf9\xad\x4e\x6a\x1b\x59\x00\x66\xb1\xe1\xc3\xf1\xee\x80\x1c\x11\xd7\x5e\xaa\xe7\x37\xac\x66\xf3\x3e\xc2\xe2\x30\x7f\x98\x6f\x20\xbe\xcd\x1c\x83\x3d\x85\x85\x03\x49\xae\x6c\x44\x92\x94\xda\xa0\x4f\xdd\xf6\xf8\x7e\xf2\x46\xb3\x6f\x84\x53\xc5\x4f\x81\x7e\x7c\xab\x2a\x8b\x2b\x55\xb9\xa7\x48\xde\xa5\x00\x89\x4f\xf3\xab\xf9\x3a\x60\x10\x10\xea\x73\xb6\xba\x67\x82\xe2\xd2\xe2\xb5\x2f\x56\xbf\x19\x55\xae\x3b\x0a\x43\xd7\x75\xcb\x82\x32\x4b\xb5\x0e\x58\x1f\xec\xcd\x14\xd3\x5f\x95\x2f\xbf\x8b\xf3\xda\x71\xfd\xb2\xd5\xe6\xaa\x0f\x2c\xf6\x0c\xf3\x49\xb3\x57\xfb\xec\x93\xa6\x5c\x98\xf8\x6b\x6f\x48\xc5\x6e\xd7\xd4\xaf\xe5\x93\x71\x40\xf8\x12\x07\x38\xd9\xf6\xbb\xfa\xe8\xc7\xca\x37\x8f\xf1\x69\x13\x6e\xb6\xef\xaa\xec\xcd\x2d\xb6\xa7\x49\xa9\xb6\xf2\x91\x97\xf6\x7a\xe5\xdd\x3c\x04\xcb\x65\x63\xda\xc2\xeb\x6e\xec\x43\xa1\xec\x37\x28\xce\xc7\xea\x8a\xfb\x6f\x2c\x3c\xee\x42\x8b\xae\xc5\x9e\x38\xdd\x13\x26\x6d\x25\xdc\x52\xab\x4e\x7f\xd5\x54\x9a\x96\x2d\xde\xb2\xc1\xd9\xcb\xdd\xdd\xfb\x51\xee\xb2\x3c\x1f\x25\xcc\x2e\x89\xe9\xca\x63\x11\x4a\x85\x99\x2b\x34\x07\x30\xf8\x9f\x7d\xe2\xb2\x3e\xed\xb5\x5b\x58\x7b\xb1\x64\x3f\xf9\x28\x24\xf3\xab\xd3\xf1\x5c\xd5\x5a\x0c\x9a\xbe\x89\x83\x3c\xdd\xd5\x23\x0a\x3e\xa7\x2b\xd3\x0d\xbf\xf5\x40\x7e\xef\xe7\x61\xb2\x13\x34\x9d\xfe\xae\x77\x3e\xc0\xf0\x28\xb9\x72\xd1\x24\xe5\x19\x02\x45\x68\x25\x92\x34\x7e\x1c\x42\xc3\x25\x08\x21\xb4\x47\x35\x0e\xe5\xf6\x39\x0e\x08\xd7\xb2\xf7\x7b\x61\x87\xdf\xc1\x28\x0e\x12\xd6\x4a\x91\x24\x5b\xf9\x94\xb2\xba\xca\x2d\x3e\xb2\xed\xa1\xab\xaa\x41\xe6\x7d\xdc\x64\xa7\x12\xc2\x9d\xa1\xc3\x2f\xba\x17\xe1\x40\xf3\x8e\x63\xfb\x74\x1f\xb6\x00\x4a\xdd\x6b\xce\x4c\x77\x18\x3f\x34\xa2\xdd\xf3\xba\x1f\x9c\xe6\xd1\xd5\x2e\x77\x5f\x2a\xa9\x67\xd9\x7e\xc6\x03\x38\x5d\x46\xdb\x2b\x45\xa6\xf3\x8d\xc6\xbb\x84\x7d\x93\xd8\x5d\x4c\xb5\x67\x31\xe4\x4c\x99\xa6\xc3\x7f\x9e\x4f\xd2\x40\x55\xad\xc5\x14\xb2\x74\x6a\xc1\x29\x8f\x24\x42\xf1\xf4\xad\x24\xa9\xd0\xe6\x4c\x6e\xdb\x9b\x59\xe0\xfb\x68\xf2\x4c\xf5\xf4\xe7\x67\x13\xc0\x70\x3c\xcf\x76\x51\xe1\xa6\x9f\x3f\x67\x5f\x61\x92\x63\x4a\x7d\x30\x11\xd7\xfe\xb3\x03\xc4\x5f\x3f\x6a\x5b\xff\x22\xba\x5e\x72\x95\xf0\x33\xe2\x53\x5d\x08\x71\x6f\x03\xd5\x22\x94\xde\x7c\x52\xef\x8a\xc5\x83\x4e\x18\xba\x4e\xa2\x63\x01\x8d\xc0\x26\x9f\x23\x73\x25\xe5\x73\xa0\xc5\xd9\x8e\xa8\xad\xa7\xf2\x28\x6e\xbe\x2c\x3e\x3d\x1b\xba\x18\x3a\x7d\xef\xb8\xfc\xad\x96\x40\x9b\x9d\xc5\x9e\x6c\x11\x57\xce\x65\xc7\x94\x7a\x40\xa3\x84\xe2\x14\x51\x8b\xf2\xff\x7d\x08\xc6\xb5\xe3\xbf\xa8\x6d\xa1\x0b\x95\x6c\xfd\x18\x55\x8d\x39\x75\x4d\x42\xdd\xe2\x94\x38\x12\x9c\x92\xc1\xb6\x1c\x7e\x71\x21\x1a\x01\x9c\xb2\xf2\x20\x73\xad\xbd\xbc\xb4\xe5\x32\x13\x2b\xa4\x59\xc8\x0b\x7f\x1b\xd1\x1e\xcf\xb3\xa5\x6d\x2c\x7c\x5c\x67\x2f\xa7\xa6\xd6\x30\xa9\x74\xcd\xb5\x71\x96\x9d\x02\xd3\x7a\x2d\x5d\xeb\x77\xdd\x68\x27\x53\xd9\xc5\x54\xdc\xbb\x38\x9e\x31\xcc\xf7\xc9\xaf\xa6\x96\xf2\x81\x12\x87\x66\x76\x4c\x57\x91\xed\xd3\x49\x53\x7a\x09\xc3\x5c\x22\xee\x32\x8e\x1f\x4f\xdf\x3b\xa4\xa4\x4e\xc4\xee\x7d\x79\xe3\xf7\x8c\xb5\xea\x64\x44\xd6\xe7\xe5\x3e\xbd\xaf\x66\x0c\xe9\x53\xd3\x8b\x5c\x4d\x2f\x73\x25\x67\xd9\xe8\xa9\x5d\xc2\xb0\x2b\x3e\x0c\xe1\x64\x7e\x29\xf0\xce\xf8\x57\x3e\x8c\x1e\xf7\x53\xf9\x96\x53\x98\x41\x82\xf7\x0b\x37\x0e\xb2\x08\x45\xda\x00\xea\x94\xb0\xdd\xf3\x53\x4f\x65\x23\xe7\x27\x0d\xb2\x03\xe6\x88\x61\xc3\xe1\x9a\xbe\x6a\xcb\x0f\xbf\x2f\x99\x9f\x28\x39\xc2\xba\x2b\x7f\x69\x99\xc1\x23\x70\xea\xf0\xfa\x86\xc5\x5d\xf1\x13\x44\x12\xf8\xee\x3c\xa6\xe4\x90\x84\xa6\x3c\x91\xa4\x8f\xb3\xc6\x9b\xb8\x3e\xaf\x6d\x74\x57\x1a\xf5\xb3\x65\xf1\x7f\xcb\xbd\xad\xe9\xed\x30\xbe\x97\x0c\xee\x25\x6a\x82\xfe\xdb\xcb\xef\xe5\x54\xa4\xcc\xd4\x69\x27\xba\x61\x7b\x3e\xe2\x1f\x9b\x10\x7c\x8a\x7f\x9b\x9d\x65\xec\x9f\xcc\x9c\xe3\xa6\xa7\x1e\x1f\x59\x23\xbf\xbc\xa9\x76\x36\xd8\xac\xfe\xba\x10\xd9\x69\x88\x4d\x14\xcc\x7d\x25\xe3\x88\xa4\xd5\x70\x8c\x14\xdf\xee\x4c\x59\x2b\x22\x2a\x90\x3c\x63\x11\x47\x16\xad\x76\xda\xf6\x19\xa9\xdf\x3d\xbe\x18\xba\x9e\x37\x77\x0d\x97\x05\x07\x74\x46\xbd\xd5\x51\xec\x7f\x81\xe5\x77\x20\xf1\xa8\x3f\xb7\x28\x04\x66\x74\xf8\xcc\x85\xe5\xd3\x0b\xf1\x9f\x6f\x20\xaf\xc1\xc9\x61\xdb\xf3\x7a\xf0\xe7\xbf\x6c\xa9\xd0\x8e\xd8\xaf\x19\x4e\xa8\xb2\x97\x8d\x11\x9c\xfa\xc7\xc7\x3a\x92\xe2\x2d\x02\x7f\x81\x2d\x78\x60\xf7\x29\x9f\x87\x67\x3f\x7e\x8d\x65\x93\x7a\x47\x81\x3e\xa3\xde\xbe\x3b\x76\x2c\x19\x36\x40\xf4\xbc\xae\x85\xe9\x4a\x1d\xf7\xd6\x40\x55\xf9\x51\xcd\xe8\x9d\xe8\x31\x5d\x80\x65\x39\x54\x1c\x3f\xf1\xb5\x40\x07\x2e\xb3\xdc\x18\x2a\xf3\x21\xd3\x36\xfc\xe4\xd1\x6b\xc1\xd3\x79\x4f\x84\x86\xb2\xd5\xb0\xca\xb7\xd6\x59\x30\x91\x2e\xd9\x1c\xed\x7d\x17\x70\x61\x34\xf2\xfb\x3e\x69\x8a\x20\x15\x20\xc8\x4e\x14\x7e\x31\xa8\x78\xcc\xaa\x4c\x70\xed\x15\xe2\x41\x75\xb5\x11\xf9\x25\x29\xda\xe1\x4b\x7e\xea\xd4\x7d\x3f\x87\xb0\x94\x49\xed\xbd\xb1\xbb\x85\x73\x1c\x39\xb1\xe3\xa3\x0d\x5b\x6d\xe3\x59\xd7\x42\x96\xbe\x41\x67\x70\xbf\x66\x0e\x41\x13\x79\x0a\x2b\x39\x5c\xff\x42\x99\x83\x64\xd2\xcf\xf9\xa5\xda\x65\xc4\xfe\xfc\x6a\x82\x48\x6c\x4c\xb7\x7f\x0f\x03\x32\xd6\x74\xe4\x41\xbe\xdc\xa7\xcc\x2b\xcc\x15\x1f\x52\xd8\x12\x6d\xf9\x32\x3b\x95\xc6\x70\xd7\x67\x05\xfa\x71\x4c\x73\xb5\x61\x5b\xa9\x53\x3f\x78\x52\xb9\x02\x21\x7d\xc8\x33\x77\x32\xb7\xdc\x3c\x48\xad\x7a\xee\xc9\x78\x8f\x80\x14\x09\x2e\xa2\x16\xfd\x6c\xb0\x36\x9b\x23\xab\x0c\x13\xd9\xc9\x34\xf7\xba\x67\xc6\x27\x7a\xc9\x8c\xe5\x99\x87\xab\xfd\xf9\x2e\xa9\x74\x3b\x75\x36\x0a\x5d\x98\x3e\x77\xe0\xef\xd8\x0e\xe6\x02\xb9\xd1\xea\xa7\x5c\x0b\x1a\x37\x1e\xfe\x79\x54\x2a\xef\xfe\xb6\x6a\x57\xa5\x3c\x32\x5e\x6e\xe2\xe8\x9f\xfc\x7a\xee\xad\x59\xfb\xd3\x76\x3e\x25\xc1\x18\x54\x91\xbd\x0d\x03\xbe\xc3\x8f\x48\xb2\x3a\x84\x6f\xd3\x24\xfc\xaf\x48\xfb\x74\x09\xb7\x7b\x1c\x1c\xed\xfe\xf7\x2e\x97\xf3\x29\xb2\x45\x74\xa9\x5d\xa9\x34\x03\x58\xea\x7f\x70\x71\x2c\x2d\x92\x88\x93\xe6\x7b\xec\xb0\x3e\x1d\x46\x2a\x23\x7a\x71\x70\x55\xce\xd6\x44\x59\x2b\xa0\x7b\xe7\x53\x6d\x57\x55\x1a\x46\xb5\xda\xb5\xed\x3b\x96\x6a\x1c\xf9\x6c\x73\x8c\x9a\x55\xbf\xee\x40\x1a\x83\x46\x47\x50\x5e\x9f\x16\xf7\x6b\x67\x8d\x49\x31\xac\xe3\x82\xa6\x38\x3d\xcb\x35\xb0\x91\xda\xe6\xf4\x95\xa7\x9b\x75\x0d\x8c\x53\x0a\x33\x8e\xe5\x9f\x75\xfd\x3c\x7b\x7b\x1d\xac\x1c\xd1\x82\xa3\xbd\x0e\x4e\xe8\x83\x06\xba\xe2\xdc\xa0\x02\x58\xf9\xee\xaf\x90\x97\x2c\xd2\x0f\x07\x9c\xbf\x5f\x8d\xc4\xc9\xd7\xb3\x41\x7d\x8e\x3f\xda\xa4\x7c\x60\x80\x51\x98\x09\xad\x8c\x2e\xbf\x94\x73\xd0\x68\xd6\x93\x0f\xd9\xc0\x9e\xa0\x71\xa0\xf9\x41\x91\xae\x86\x3f\xe8\x35\x27\x7d\xc8\xda\xe5\x64\x3c\x68\x2a\x8a\x50\x1b\x97\x6f\x21\x00\x67\x5b\x4e\xbc\xd8\xff\xfc\x05\x07\x7e\xac\xcd\x33\x02\x6c\xd2\x3e\x67\xd3\x9f\xfb\x79\xea\xc0\x57\xc1\xdb\x54\x91\x78\xba\xc5\xce\xf8\x2e\xd2\x43\xaf\x61\x9c\x7d\xd8\x62\xb9\xbb\x92\xf1\xc2\xdc\xfe\xd5\xbc\x0f\x2e\x6d\xdb\x01\x00\xd5\xaa\xa7\x7f\x82\xa1\xca\xb6\xbb\x5f\x66\x61\x4d\x31\xfa\x97\xdc\x47\x3f\x5d\x63\x6c\xe5\xe7\x2a\xfb\x89\xdf\xa3\xfd\xa6\x43\x33\x8b\x6b\xfd\xfd\xfa\x8f\x1e\x2f\x65\x91\x66\x6e\xf5\xf6\xcb\x2c\xd4\xaa\x52\x14\x5f\x9c\xa9\x86\xa3\x3b\x9c\x7b\xc2\xa2\x6f\xc4\xa9\x97\x3c\x7a\xcf\x01\xf9\x2e\x97\x88\xe2\x9d\xea\x1e\xc1\xc1\x33\x4f\x55\xb7\x45\xd8\x31\xb1\xdf\x15\x76\x54\x55\x30\x5d\xa5\xb2\xea\xa8\x12\x39\x06\x30\xae\x1e\xcc\x91\xbb\x99\x8c\x4d\x59\x0f\x52\xfe\xdb\xb0\x58\xf0\x2b\x0c\x1f\xc2\xb3\xe2\x79\xfd\x4f\xf3\x02\xd1\x88\x7f\x26\x09\xe6\x85\xe1\x38\x33\xeb\x3a\x5e\x28\xe9\xa6\xd1\x7c\x7b\xdc\xf5\xbb\x38\x5b\x83\xa0\x2b\x35\x2c\xf8\xb4\xc8\x67\x4c\x40\xf4\xb0\x1f\x23\x78\xe6\x69\xf2\x96\x34\xf2\x9f\x4e\x08\x88\xe9\x72\x35\x0c\xc0\x60\x2b\x2f\x16\x09\xc9\x36\x6a\xc6\x8e\xbf\x70\x68\x78\xc9\x29\x9b\x49\x57\xc7\xce\x03\x36\xfe\x81\x03\xf2\xf5\xbf\x9d\x67\xe4\x46\xe1\xe8\x8e\x1f\x79\x61\xdd\xc7\xbf\x40\xa6\x2d\x4b\x44\x92\x95\x62\x5f\xce\xb4\xd3\xff\x19\xc2\xd0\x41\x5c\x2e\x78\x1f\x24\x64\x9a\x97\x2a\x09\x36\x52\x0e\x5e\x84\xe8\xfa\x8d\x77\xb3\xf7\xb8\x95\x99\x9c\x1a\x5c\x35\x58\xdc\xdf\x12\xfa\xb0\x1c\x7f\xc7\x4d\x99\xd9\xc2\xb2\xca\x6a\xce\x30\x60\xe5\x16\x1c\x5d\xe5\x7d\x46\x23\x56\xb5\x5f\x00\xaa\x5c\x35\x73\x74\x9e\x69\x8f\xbb\x90\x93\x32\x5b\x1b\x87\xc7\xfb\x58\xd3\x83\xdd\x73\xd4\x28\xd2\xdf\x6e\x87\x71\x62\x1a\x4f\x4b\x2d\x2b\x94\x54\x4d\x5b\xcc\x69\x26\x60\x48\x76\x14\x8e\x56\x56\xd3\x76\xb6\xdb\x99\x90\xc1\x60\x87\x24\xc9\x4e\xdc\xf1\xc7\xf0\x72\x6f\x6b\xb6\xac\xf7\x99\x17\x5c\xd3\x86\xfb\xe7\x83\x37\xab\x8a\xc6\x36\xa1\xc1\x1e\x6a\xed\x2c\x4a\x7a\xf8\x61\xe5\x66\x6c\x38\x77\xd0\xa6\xaa\x8d\xcf\x28\x8b\x45\x16\xef\x5c\x3f\xbe\x64\x67\x5b\x70\xa4\x6c\x32\x5f\x4d\xd2\x40\x6d\xb7\xc3\x34\xf1\x21\x71\x1d\xac\xf4\x89\x79\x56\xe8\x54\x2a\x33\x14\x0c\x94\xb5\xb7\x05\x18\xa3\xae\xf7\x9c\xb6\x32\x8e\x2d\xd4\x34\x8e\x4d\xce\x6a\xb0\x97\xcf\xca\x2b\x40\x90\x45\x06\x7d\x76\x8f\xa5\x05\x2d\xc7\x76\xd5\x4a\x3d\xe1\x95\x8b\xe0\x56\x13\xfa\x61\x84\xc1\x56\x78\x39\x6a\xa0\x50\xc6\xa6\xc9\xb0\xff\xd0\xd0\x39\x0d\x7e\xcd\x81\xe1\xd3\x22\xd7\x99\x29\xa5\x4a\x7a\x86\x08\x22\xc9\xbc\x7d\xdd\x8c\xf6\x09\x4b\x22\x41\x47\x25\x4c\x97\x44\xa9\x2e\xbe\xed\x6c\x3c\x02\xf8\x2e\x4e\x24\x15\xd9\xee\x43\x0e\xa2\xaf\xd7\x90\xa0\x41\x65\x22\x1d\x7a\xce\x20\x00\x83\xb2\xa1\x87\x64\xaa\x59\xfe\x19\xa4\x28\xab\x4d\xff\x7f\xb2\x66\x7b\xe7\x0e\xe5\x70\xbb\x91\x33\xd9\xdb\x87\xb3\x91\xb9\x4f\x4e\x27\x5e\x29\xcc\x78\xfa\x3d\x67\x94\x75\x38\x21\x04\x5b\x1f\xf7\xc6\xfa\xd9\xe4\x96\x98\xcc\xea\x04\x4f\x2a\x75\x9d\x70\x49\x72\x4b\xbf\xdb\xaf\x76\xa9\x10\x48\x5d\xfe\xc9\x63\x41\x5d\x5f\x0d\xb3\x59\x13\x8d\xcd\x45\x2f\xb2\xbc\x1c\x72\x5d\x62\xb3\x1a\x71\xca\x98\x8b\xcf\xc2\xd5\x76\x1e\x5f\x70\x7d\xf8\x3c\x08\x98\x75\xbd\x48\x7d\xf8\x28\x8b\xdb\xdd\xf0\x1f\xc7\x73\x30\x74\x0a\x33\x24\x53\x58\x38\x5c\x41\xfb\xaa\xde\x58\x1c\x24\xcb\xb4\x9c\x99\x10\xec\x96\x39\x98\xfe\x8b\x88\xab\x65\x26\x3b\xf5\xc1\xff\xf6\xb9\x8c\xcb\xaa\xec\xfa\x05\x20\x66\x4b\x38\x2d\x81\xa6\xe5\xd1\xdd\x73\x6d\xfe\xd1\x43\xff\x6b\x6a\x62\x3b\x21\x37\x0a\xa7\x78\x73\x61\xba\xf2\x90\xa0\xb9\x38\xa6\x2b\xc3\x2e\xff\x1b\x58\xa0\x4c\xe6\x2a\x43\xe0\xd3\xfc\xfe\x99\x11\xa1\xb8\x33\xfe\x3f\xaf\x59\xff\x03\x96\xfc\xf1\xcb\x7f\x47\x91\xdf\x5f\xe1\x5c\x8f\x04\x68\x29\xd3\xc1\xf9\x32\xa0\xb4\x61\x10\x80\x61\x81\x56\xa6\x19\x0f\x2e\x9f\xaa\x94\xfe\xd7\xdf\x13\x89\xa1\x9e\xdb\x0d\xb2\x2b\xbb\x44\x52\x2f\x07\xe4\xbb\x82\x04\xbb\x95\x30\x25\x9c\xc6\x7b\xbf\x29\x69\x5a\xb4\x8f\x0f\xd3\x95\x4a\x23\x48\x38\x90\xf2\x3e\xfd\x0b\x12\x8c\x0b\x23\x92\x92\xff\x0a\xda\xde\xaf\x8c\xfc\x97\x40\xe4\x9a\xaa\x7d\x7d\x3c\x3d\xbe\x43\x01\x06\x88\xff\x73\x94\xe5\xa0\xc1\xac\x9d\xef\xda\x11\x26\xcb\x5f\x6e\xbb\x86\xc2\xd0\xcf\x84\x0e\xca\x93\x34\xff\x1b\x5c\x40\x5e\x3a\xad\x44\xfa\x0f\xe5\x53\xfc\x40\xfe\x71\xc4\xd0\xea\x97\xf2\x4e\xd8\xbf\xae\xc3\xca\x08\x90\x9d\xb8\xd3\xc9\xeb\x53\x5b\xb1\xf7\x3d\x0b\x4f\x5b\xd9\x31\xfc\x7d\x2d\xac\x50\xd2\x1c\x0c\x5d\xf5\xe8\x4b\x7f\x52\x79\x14\x3d\x9e\xf7\x9f\x7b\xb5\xb4\x43\xfe\xfd\x1d\x1c\xdd\xd5\xb6\xeb\xd7\x1b\x11\x64\x91\x71\x18\xbe\xc8\x91\x48\x32\xa0\x9d\xeb\x83\x10\x33\x07\xa3\xde\x68\x6b\x0c\x3f\x12\x58\x61\xfb\xb7\xfe\x73\x77\x6c\xb0\xd7\x6c\x1b\x36\x1c\xb8\xd2\xf1\xa9\x26\xdc\x98\xdc\xea\xb1\xdc\x6a\xe1\x6d\x7d\x3b\x99\xbd\xa6\xcd\xb2\x48\xa7\xdd\x63\x59\xc1\xeb\x09\xcf\x17\xd7\xa6\x02\xe5\x60\x60\x85\x6d\xd6\xb5\xc8\x71\xd6\xb5\xc3\x6f\xd6\xb5\x82\x2f\x3e\xf2\xd8\xd5\x9e\xfd\x95\x62\x56\x32\x3d\x5e\x64\x3d\xb6\x0b\x86\x4e\x63\x87\x4c\x59\x9c\x9a\x50\x7d\xb6\xad\x08\x90\xb6\x91\xff\xf1\xdf\xc6\x22\x7e\x35\x71\x2e\x57\x99\x9c\x52\x6a\xfb\xf7\xda\xac\x12\xa6\xc4\x14\x7a\x1d\xd3\x3d\xce\x59\xcc\x47\x77\xf0\xc5\xfc\x95\xc6\xf2\x94\x96\x3b\x73\xdb\xe3\xb6\x86\xa0\xf6\x1a\x47\x26\xbf\x3f\x1b\xe7\x6d\xe8\xc7\x05\xd3\xfd\xfe\x6c\x5c\xc9\x2f\x5e\xf2\xe2\x50\x50\x3b\x3f\xfe\xc7\xb9\x2a\xf8\xaa\x03\xfe\xf2\xac\x6b\x4e\x61\xca\x57\x29\x49\x4d\x63\x6c\x01\x7f\x89\x2a\x9a\xbd\xf4\x81\xea\x19\x3a\x30\x63\x84\xfe\x60\x36\x5b\xc0\xef\x0e\x4d\xbc\x89\xb9\x49\xd4\xfe\x3b\x49\x3d\x58\xf3\x0d\x7a\x40\xfe\x85\x70\x2c\x4a\x34\xf1\x95\x1c\x65\x6a\x71\x22\x9d\xa4\x7f\xdf\x1d\x90\x19\xc5\x37\x17\x54\x53\xd5\x95\xaf\x26\x7a\x70\x94\x9c\x98\x75\x15\xf1\xe2\x51\x45\xa7\xb0\xaa\xa2\xd3\x3c\xc1\x0d\x30\xe9\x55\x4c\xd2\xc6\x8b\x37\xa7\x82\xad\x67\x5d\xad\x16\xa0\x7e\xd3\x0e\xa8\x3f\x69\x4e\x07\x6b\xec\xbe\x7a\x97\x25\xe3\x49\xf2\xf3\x28\x2e\x76\x03\x49\xc3\x21\xc9\x60\x40\x9c\x37\x98\x86\x36\xeb\x5a\xf7\x26\xe6\xf6\xda\x8b\x37\x77\x2c\x04\xa1\x7e\xfd\x79\x9e\xd4\x9c\x42\x91\xfe\x87\x6f\xb3\x86\xcc\x38\xea\xee\xc6\x0c\xda\xf4\x3d\x88\x60\xf9\xf7\xdb\xd4\x61\x06\xb3\x82\xf1\x39\x01\xcd\x79\x88\x52\x5f\xf3\x57\xf4\x7f\x37\x02\x2d\xde\x75\x56\xca\x05\x4c\x9b\x7e\xbb\xe5\xb1\xd7\xff\x29\x65\xa0\x14\x0a\x1a\x71\x98\x97\x9a\xf7\xc5\xb4\x61\xc3\x71\x30\x70\x8a\xff\x1f\xa8\xfb\x47\x01\x63\xd5\xdb\x3e\x1f\xa8\xcb\xfb\x08\x18\x6c\x7f\x3f\x54\x83\xb7\x62\xdc\x6a\xbb\x0a\x06\x83\xc1\xf4\xb4\x8d\xb4\xca\x34\xae\x45\xfc\xbf\x00\x00\x00\xff\xff\x13\xa6\x7f\x55\x9f\x30\x00\x00")
func init() | {
rb := bytes.NewReader(FileAssetsAwsDatabaseDatabaseMigrationServicePng)
r, err := gzip.NewReader(rb)
if err != nil {
panic(err)
}
err = r.Close()
if err != nil {
panic(err)
}
f, err := FS.OpenFile(CTX, "assets/aws/database/database-migration-service.png", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
panic(err)
}
_, err = io.Copy(f, r)
if err != nil {
panic(err)
}
err = f.Close()
if err != nil {
panic(err)
}
} |
|
manager.go | // Code generated by mockery v1.0.0. DO NOT EDIT.
package blob
import (
context "context"
blobmodels "github.com/goharbor/harbor/src/pkg/blob/models"
mock "github.com/stretchr/testify/mock"
models "github.com/goharbor/harbor/src/common/models"
)
// Manager is an autogenerated mock type for the Manager type
type Manager struct {
mock.Mock
}
// AssociateWithArtifact provides a mock function with given fields: ctx, blobDigest, artifactDigest
func (_m *Manager) AssociateWithArtifact(ctx context.Context, blobDigest string, artifactDigest string) (int64, error) {
ret := _m.Called(ctx, blobDigest, artifactDigest)
var r0 int64
if rf, ok := ret.Get(0).(func(context.Context, string, string) int64); ok {
r0 = rf(ctx, blobDigest, artifactDigest)
} else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
r1 = rf(ctx, blobDigest, artifactDigest)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// AssociateWithProject provides a mock function with given fields: ctx, blobID, projectID
func (_m *Manager) AssociateWithProject(ctx context.Context, blobID int64, projectID int64) (int64, error) {
ret := _m.Called(ctx, blobID, projectID)
var r0 int64
if rf, ok := ret.Get(0).(func(context.Context, int64, int64) int64); ok {
r0 = rf(ctx, blobID, projectID)
} else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok {
r1 = rf(ctx, blobID, projectID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CleanupAssociationsForArtifact provides a mock function with given fields: ctx, artifactDigest
func (_m *Manager) CleanupAssociationsForArtifact(ctx context.Context, artifactDigest string) error {
ret := _m.Called(ctx, artifactDigest)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, artifactDigest)
} else {
r0 = ret.Error(0)
}
return r0
}
// CleanupAssociationsForProject provides a mock function with given fields: ctx, projectID, blobs
func (_m *Manager) CleanupAssociationsForProject(ctx context.Context, projectID int64, blobs []*models.Blob) error {
ret := _m.Called(ctx, projectID, blobs)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64, []*models.Blob) error); ok {
r0 = rf(ctx, projectID, blobs)
} else {
r0 = ret.Error(0)
}
return r0
}
// Create provides a mock function with given fields: ctx, digest, contentType, size
func (_m *Manager) Create(ctx context.Context, digest string, contentType string, size int64) (int64, error) {
ret := _m.Called(ctx, digest, contentType, size)
var r0 int64
if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) int64); ok | else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok {
r1 = rf(ctx, digest, contentType, size)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: ctx, digest
func (_m *Manager) Get(ctx context.Context, digest string) (*models.Blob, error) {
ret := _m.Called(ctx, digest)
var r0 *models.Blob
if rf, ok := ret.Get(0).(func(context.Context, string) *models.Blob); ok {
r0 = rf(ctx, digest)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*models.Blob)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, digest)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IsAssociatedWithArtifact provides a mock function with given fields: ctx, blobDigest, artifactDigest
func (_m *Manager) IsAssociatedWithArtifact(ctx context.Context, blobDigest string, artifactDigest string) (bool, error) {
ret := _m.Called(ctx, blobDigest, artifactDigest)
var r0 bool
if rf, ok := ret.Get(0).(func(context.Context, string, string) bool); ok {
r0 = rf(ctx, blobDigest, artifactDigest)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
r1 = rf(ctx, blobDigest, artifactDigest)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// IsAssociatedWithProject provides a mock function with given fields: ctx, digest, projectID
func (_m *Manager) IsAssociatedWithProject(ctx context.Context, digest string, projectID int64) (bool, error) {
ret := _m.Called(ctx, digest, projectID)
var r0 bool
if rf, ok := ret.Get(0).(func(context.Context, string, int64) bool); ok {
r0 = rf(ctx, digest, projectID)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, int64) error); ok {
r1 = rf(ctx, digest, projectID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List provides a mock function with given fields: ctx, params
func (_m *Manager) List(ctx context.Context, params blobmodels.ListParams) ([]*models.Blob, error) {
ret := _m.Called(ctx, params)
var r0 []*models.Blob
if rf, ok := ret.Get(0).(func(context.Context, blobmodels.ListParams) []*models.Blob); ok {
r0 = rf(ctx, params)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*models.Blob)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, blobmodels.ListParams) error); ok {
r1 = rf(ctx, params)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Update provides a mock function with given fields: ctx, _a1
func (_m *Manager) Update(ctx context.Context, _a1 *models.Blob) error {
ret := _m.Called(ctx, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *models.Blob) error); ok {
r0 = rf(ctx, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
| {
r0 = rf(ctx, digest, contentType, size)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.