file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
layout.rs
|
pub use core::alloc::Layout;
use core::{
convert::{TryFrom, TryInto},
fmt,
mem,
num::NonZeroUsize,
};
/// The parameters given to `Layout::from_size_align` or some other `Layout` constructor do not
/// satisfy its documented constraints.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct LayoutErr {
private: (),
}
impl From<core::alloc::LayoutErr> for LayoutErr {
#[must_use]
fn from(_: core::alloc::LayoutErr) -> Self {
Self { private: () }
}
}
impl fmt::Display for LayoutErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid parameters to Layout::from_size_align")
}
}
/// Non-zero Layout of a block of memory.
///
/// An instance of `NonZeroLayout` describes a particular layout of memory. You build a
/// `NonZeroLayout` up as an input to give to an allocator.
///
/// All layouts have an associated non-negative size and a power-of-two alignment.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct NonZeroLayout {
// size of the requested block of memory, measured in bytes.
size: NonZeroUsize,
// alignment of the requested block of memory, measured in bytes.
// we ensure that this is always a power-of-two, because API's
// like `posix_memalign` require it and it is a reasonable
// constraint to impose on Layout constructors.
//
// (However, we do not analogously require `align >= sizeof(void*)`,
// even though that is *also* a requirement of `posix_memalign`.)
align: NonZeroUsize,
}
impl NonZeroLayout {
/// Constructs a `Layout` from a given `size` and `align`,
/// or returns `LayoutErr` if either of the following conditions
/// are not met:
///
/// * `align` must not be zero,
/// * `align` must be a power of two,
/// * `size` must not be zero,
/// * `size`, when rounded up to the nearest multiple of `align`, must not overflow (i.e., the
/// rounded value must be less than `usize::MAX`).
#[inline]
pub fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutErr> {
Layout::from_size_align(size, align)?.try_into()
}
/// Creates a layout, bypassing all checks.
///
/// # Safety
///
/// This function is unsafe as it does not verify the preconditions from
/// [`NonZeroLayout::from_size_align`][].
#[inline]
#[must_use]
pub const unsafe fn from_size_align_unchecked(size: NonZeroUsize, align: NonZeroUsize) -> Self {
Self { size, align }
}
/// The minimum size in bytes for a memory block of this layout.
#[inline]
#[must_use]
pub const fn size(&self) -> NonZeroUsize {
self.size
}
/// The minimum byte alignment for a memory block of this layout.
#[inline]
#[must_use]
pub const fn align(&self) -> NonZeroUsize {
self.align
}
/// Constructs a `NonZeroLayout` suitable for holding a value of type `T`.
///
/// Returns `Err` if `T` is a ZST.
#[inline]
pub fn new<T>() -> Result<Self, LayoutErr> {
Layout::new::<T>().try_into()
}
/// Constructs a `NonZeroLayout` suitable for holding a value of type `T`.
///
/// # Safety
///
/// This function is unsafe as it does not verify the preconditions from
/// [`NonZeroLayout::new`][].
#[inline]
#[must_use]
pub const unsafe fn new_unchecked<T>() -> Self {
Self::from_size_align_unchecked(
NonZeroUsize::new_unchecked(mem::size_of::<T>()),
NonZeroUsize::new_unchecked(mem::align_of::<T>()),
)
}
/// Produces layout describing a record that could be used to allocate backing structure
/// for `T` (which could be a trait or other unsized type like a slice).
///
/// Returns `None` if `T` is a ZST.
#[inline]
pub fn for_value<T: ?Sized>(t: &T) -> Option<Self> {
Layout::for_value(t).try_into().ok()
}
/// Returns the amount of padding we must insert after `self` to ensure that the following
/// address will satisfy `align` (measured in bytes).
///
/// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)` returns 3, because that is
/// the minimum number of bytes of padding required to get a 4-aligned address (assuming
/// that the corresponding memory block starts at a 4-aligned address).
///
/// The return value of this function has no meaning if `align` is not a power-of-two.
///
/// Note that the utility of the returned value requires `align` to be less than or equal to the
/// alignment of the starting address for the whole allocated block of memory. One way to
/// satisfy this constraint is to ensure `align <= self.align()`.
#[inline]
#[must_use]
pub const fn padding_needed_for(&self, align: NonZeroUsize) -> usize {
// Rounded up value is:
// len_rounded_up = (len + align - 1) & !(align - 1);
// and then we return the padding difference: `len_rounded_up - len`.
//
// We use modular arithmetic throughout:
//
// 1. align is guaranteed to be > 0, so align - 1 is always valid.
//
// 2. `len + align - 1` can overflow by at most `align - 1`, so the &-mask wth
// `!(align - 1)` will ensure that in the case of overflow, `len_rounded_up` will
// itself be 0. Thus the returned padding, when added to `len`, yields 0, which
// trivially satisfies the alignment `align`.
//
// (Of course, attempts to allocate blocks of memory whose size and padding overflow in the
// above manner should cause the allocator to yield an error anyway.)
let len = self.size().get();
let align = align.get();
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
len_rounded_up.wrapping_sub(len)
}
/// Produces layout describing a record that could be used to allocate backing structure
/// for `T` (which could be a trait or other unsized type like a slice).
///
/// # Safety
///
/// This function is unsafe as it does not verify the preconditions from
/// [`NonZeroLayout::for_value`][].
#[inline]
pub unsafe fn for_value_unchecked<T: ?Sized>(t: &T) -> Self {
debug_assert_ne!(mem::size_of_val(t), 0);
debug_assert_ne!(mem::align_of_val(t), 0);
Self::from_size_align_unchecked(
NonZeroUsize::new_unchecked(mem::size_of_val(t)),
NonZeroUsize::new_unchecked(mem::align_of_val(t)),
)
}
/// Creates a layout describing the record for `n` instances of `self`, with a suitable amount
/// of padding between each to ensure that each instance is given its requested size and
/// alignment. On success, returns `(k, offs)` where `k` is the layout of the array and
/// `offs` is the distance between the start of each element in the array.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[inline]
pub fn repeat(&self, n: NonZeroUsize) -> Result<(Self, NonZeroUsize), LayoutErr>
|
/// Creates a layout describing the record for a `[T; n]`.
///
/// On arithmetic overflow, returns `LayoutErr`.
#[inline]
pub fn array<T>(n: NonZeroUsize) -> Result<Self, LayoutErr> {
Self::new::<T>()?.repeat(n).map(|(k, offs)| {
debug_assert_eq!(offs.get(), mem::size_of::<T>());
k
})
}
}
impl Into<Layout> for NonZeroLayout {
#[must_use]
fn into(self) -> Layout {
let size = self.size().get();
let align = self.align().get();
debug_assert!(Layout::from_size_align(size, align).is_ok());
unsafe { Layout::from_size_align_unchecked(size, align) }
}
}
impl TryFrom<Layout> for NonZeroLayout {
type Error = LayoutErr;
fn try_from(layout: Layout) -> Result<Self, Self::Error> {
unsafe {
debug_assert_ne!(layout.align(), 0);
Ok(Self::from_size_align_unchecked(
NonZeroUsize::new(layout.size()).ok_or(LayoutErr { private: {} })?,
NonZeroUsize::new_unchecked(layout.align()),
))
}
}
}
|
{
let padded_size = self
.size()
.get()
.checked_add(self.padding_needed_for(self.align()))
.ok_or(LayoutErr { private: () })?;
let alloc_size = padded_size
.checked_mul(n.get())
.ok_or(LayoutErr { private: () })?;
unsafe {
// self.align is already known to be valid and alloc_size has been
// padded already.
debug_assert_ne!(alloc_size, 0);
debug_assert_ne!(padded_size, 0);
Ok((
Self::from_size_align_unchecked(
NonZeroUsize::new_unchecked(alloc_size),
self.align(),
),
NonZeroUsize::new_unchecked(padded_size),
))
}
}
|
smtp.go
|
package template
import (
"fmt"
"os"
"strings"
"github.com/jessevdk/go-flags"
. "github.com/lkpdn/plumeria-cli/axapi/util"
. "github.com/lkpdn/plumeria-cli/axapi/v21/auth"
. "github.com/lkpdn/plumeria-cli/axapi/v21/util"
)
type Smtp struct{}
func (s *Smtp) Help() string {
return `
$ ... slb.template.smtp <subcommand>
To see options for each <subcommand>, try
$ ... slb.template.smtp <subdommand> --help
Available Subcommands:
getAll : "slb.template.smtp.getAll method."
search : "slb.template.smtp.search method."
`
}
func (s *Smtp) Synopsis() string {
return "slb.template.smtp method."
}
func (s *Smtp) Run(args []string) int {
if len(args) == 0 {
fmt.Println(s.Help())
os.Exit(0)
}
var res string
switch args[0] {
case "getAll":
res, _ = s.GetAll(args)
case "search":
res, _ = s.Search(args)
}
fmt.Println(res)
return 0
}
/**
* AXAPI(2.1) Method: Slb.Template.Smtp.GetAll
*/
func (s *Smtp) GetAll(args []string) (string, error) {
// options
var opts GlobalFlags
// parse
old_args := os.Args
os.Args[0] = strings.Join(os.Args[:(len(os.Args)-len(args)+1)], " ")
_, err := flags.Parse(&opts)
if err != nil {
os.Exit(1)
}
os.Args = old_args
if opts.SessionId == "" {
opts.SessionId = IssueSessionId()
}
// dispatch
item := CreateUrlValues(map[string]string{
"method": "slb.template.smtp.getAll",
"session_id": opts.SessionId,
})
req := CreateRequest()
req.QueryString = item
res, err := Dispatch(req)
return res, err
}
/**
* AXAPI(2.1) Method: Slb.Template.Smtp.Search
*/
|
func (s *Smtp) Search(args []string) (string, error) {
// options
var opts struct {
GlobalFlags
Name string `long:"name" description:"name of slb.template.smtp"`
}
// parse
old_args := os.Args
os.Args[0] = strings.Join(os.Args[:(len(os.Args)-len(args)+1)], " ")
_, err := flags.Parse(&opts)
if err != nil {
os.Exit(1)
}
os.Args = old_args
if opts.SessionId == "" {
opts.SessionId = IssueSessionId()
}
// validate
if opts.Name == "" {
fmt.Println(s.Help())
os.Exit(1)
}
// dispatch
item := CreateUrlValues(map[string]string{
"method": "slb.template.smtp.search",
"session_id": opts.SessionId,
"name": opts.Name,
})
req := CreateRequest()
req.QueryString = item
res, err := Dispatch(req)
return res, err
}
| |
washer_dryer.py
|
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk import ErdCode, ErdApplianceType
from .washer import WasherApi
from .dryer import DryerApi
from ..entities import GeErdSensor, GeErdBinarySensor
_LOGGER = logging.getLogger(__name__)
class WasherDryerApi(WasherApi, DryerApi):
"""API class for washer/dryer objects"""
APPLIANCE_TYPE = ErdApplianceType.COMBINATION_WASHER_DRYER
def
|
(self) -> List[Entity]:
base_entities = self.get_base_entities()
common_entities = [
GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_STATE),
GeErdSensor(self, ErdCode.LAUNDRY_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_SUB_CYCLE),
GeErdBinarySensor(self, ErdCode.LAUNDRY_END_OF_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_TIME_REMAINING),
GeErdSensor(self, ErdCode.LAUNDRY_DELAY_TIME_REMAINING),
GeErdBinarySensor(self, ErdCode.LAUNDRY_DOOR),
GeErdBinarySensor(self, ErdCode.LAUNDRY_REMOTE_STATUS),
]
washer_entities = self.get_washer_entities()
dryer_entities = self.get_dryer_entities()
entities = base_entities + common_entities + washer_entities + dryer_entities
return entities
|
get_all_entities
|
publish.rs
|
use crate::{
commands::Run,
globals::{TERM_ERR, TERM_OUT},
utils,
};
use anyhow::{bail, Context, Result};
use dialoguer::{Input, PasswordInput};
use indicatif::ProgressBar;
use manifest::Manifest;
use reqwest::{
blocking::{
multipart::{Form, Part},
ClientBuilder,
},
StatusCode,
};
use std::{
fs::{self, File},
io::Cursor,
path::PathBuf,
};
use structopt::StructOpt;
/// BeatMods1 categories (legacy)
static BM1_CATEGORIES: &[&str] = &[
"Other",
"Core",
"Cosmetic",
"Practice / Training",
"Gameplay",
"Stream Tools",
"Libraries",
"UI Enhancements",
"Lighting",
"Tweaks / Tools",
"Multiplayer",
"Text Changes",
];
/// Publish command options
#[derive(StructOpt, Debug)]
pub struct
|
{
/// File to publish
#[structopt(name = "FILE")]
file: Option<PathBuf>,
/// BeatMods1 category (legacy)
#[structopt(short, long, name = "CATEGORY", default_value = "Other")]
category: String,
/// Lists BeatMods1 categories (legacy)
#[structopt(short, long)]
list_categories: bool,
/// BeatMods1 user (legacy)
#[structopt(short, long, name = "USER")]
user: Option<String>,
/// BeatMods1 password (legacy)
#[structopt(short, long, name = "PASSWORD")]
password: Option<String>,
}
impl Run for Publish {
fn run(self, verbose: bool) -> Result<()> {
if self.list_categories {
TERM_OUT.write_line(&BM1_CATEGORIES.join("\n"))?;
return Ok(());
}
let manifest = read_manifest()?;
manifest.validate()?;
run_commands(&manifest, verbose).context("Failed to run script specified in manifest")?;
let resource = if let Some(file) = self.file {
fs::read(file).context("Failed to read specified file")?
} else if let Some(resource) = &manifest.publish.resource {
read_resource(resource, verbose)
.context("Failed to read resource specified in manifest")?
} else {
bail!("No resource to publish specified");
};
let user = self.user.unwrap_or(
Input::new()
.with_prompt("BeatMods1 username")
.interact_on(&*TERM_ERR)?,
);
let password = self.password.unwrap_or(
PasswordInput::new()
.with_prompt("BeatMods1 password")
.interact_on(&*TERM_ERR)?,
);
publish_bm1(manifest, resource, self.category, user, password)?;
Ok(())
}
}
/// Reads and parses the `manifest.json` file
fn read_manifest() -> Result<Manifest> {
let p = ProgressBar::new_spinner();
p.set_message("Reading manifest");
p.enable_steady_tick(100);
let manifest_path = PathBuf::from("manifest.json");
if !manifest_path.exists() {
bail!("Can't find manifest file, make sure you are running from the same directory.");
}
let manifest_file = File::open(manifest_path).context("Failed to read manifest file")?;
let result = Manifest::from_reader(&manifest_file).context("Invalid manifest file")?;
p.finish();
Ok(result)
}
/// Runs the publish script commands from the manifest
fn run_commands(manifest: &Manifest, verbose: bool) -> Result<()> {
let p = ProgressBar::new_spinner();
p.set_message("Running commands");
p.enable_steady_tick(100);
let script = &manifest.publish.script;
if script.is_empty() {
p.finish_with_message("No commands to run");
return Ok(());
};
for command in script {
TERM_ERR.write_line(&(format!("$ {}", &command)))?;
let o = utils::shell_exec(&command, verbose).context("Failed to run command")?;
if !o.success() {
bail!("Command did not exit successfully");
}
}
p.finish();
Ok(())
}
/// Obtains a byte buffer containing the resource to upload to BeatMods2
fn read_resource(resource_path: &PathBuf, verbose: bool) -> Result<Vec<u8>> {
let p = ProgressBar::new_spinner();
p.set_message("Getting resource ready");
p.enable_steady_tick(100);
if !resource_path.exists() {
bail!("Can't find specified resource");
}
let result = if resource_path.is_dir() {
p.set_message("Resource is a directory, zipping");
let buffer = Cursor::new(Vec::new());
utils::zip_dir(resource_path, buffer, verbose)
.context("Failed to zip directory")?
.into_inner()
} else {
fs::read(resource_path)?
};
p.finish();
Ok(result)
}
/// Publishes the mod to BeatMods1 (legacy)
fn publish_bm1(
manifest: Manifest,
resource: Vec<u8>,
category: String,
user: String,
password: String,
) -> Result<()> {
let p = ProgressBar::new_spinner();
p.set_message("Publishing to BeatMods1");
p.enable_steady_tick(100);
let version_string = manifest.version.to_string();
let link_string = if let Some(l) = manifest.links.project_home {
l.into_string()
} else if let Some(l) = manifest.links.project_source {
l.into_string()
} else {
"https://beatmods.com".to_owned()
};
let description_string = manifest.description.join("\n");
if !BM1_CATEGORIES.iter().any(|c| c == &category) {
bail!("Invalid category");
}
let mut resource_name = manifest.id.clone();
resource_name.push('.');
resource_name.push_str(&version_string);
resource_name.push_str(".zip");
let file = Part::bytes(resource)
.file_name(resource_name)
.mime_str("application/zip")?;
let mut form = Form::new()
.part("file", file)
.text("name", manifest.name)
.text("version", manifest.version.to_string())
.text("gameVersion", manifest.game_version)
.text("link", link_string)
.text("description", description_string)
.text("category", category);
if let Some(d) = &manifest.depends_on {
let dependencies_string = d
.iter()
.map(|d| {
let mut s = d.0.clone();
s.push('@');
s.push_str(&d.1.minimum().to_string());
s
})
.collect::<Vec<String>>()
.join(",");
form = form.text("dependencies", dependencies_string);
}
let client = ClientBuilder::new().cookie_store(true).build()?;
let login_form = [("username", user), ("password", password)];
let login_response = client
.post("https://beatmods.com/api/v1/signIn")
.form(&login_form)
.send()?;
let token = login_response
.headers()
.get("x-access-token")
.context("Invalid credentials")?
.to_str()?;
let response = client
.post("https://beatmods.com/api/v1/mod/create/")
.multipart(form)
.bearer_auth(token)
.send()?;
if response.status() != StatusCode::from_u16(200)? {
bail!("Publishing failed: {}", response.text()?);
}
p.finish();
Ok(())
}
|
Publish
|
base_response_options.js
|
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") return Reflect.decorate(decorators, target, key, desc);
switch (arguments.length) {
case 2: return decorators.reduceRight(function(o, d) { return (d && d(o)) || o; }, target);
case 3: return decorators.reduceRight(function(o, d) { return (d && d(target, key)), void 0; }, void 0);
case 4: return decorators.reduceRight(function(o, d) { return (d && d(target, key, o)) || o; }, desc);
}
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
import { Injectable } from 'angular2/angular2';
import { isPresent } from 'angular2/src/core/facade/lang';
import { Headers } from './headers';
import { ResponseTypes } from './enums';
/**
* Creates a response options object to be optionally provided when instantiating a
* {@link Response}.
*
* This class is based on the `ResponseInit` description in the [Fetch
* Spec](https://fetch.spec.whatwg.org/#responseinit).
*
* All values are null by default. Typical defaults can be found in the
* {@link BaseResponseOptions} class, which sub-classes `ResponseOptions`.
*
* This class may be used in tests to build {@link Response Responses} for
* mock responses (see {@link MockBackend}).
*
* ### Example ([live demo](http://plnkr.co/edit/P9Jkk8e8cz6NVzbcxEsD?p=preview))
*
* ```typescript
* import {ResponseOptions, Response} from 'angular2/http';
*
* var options = new ResponseOptions({
* body: '{"name":"Jeff"}'
* });
* var res = new Response(options);
*
* console.log('res.json():', res.json()); // Object {name: "Jeff"}
* ```
*/
export class ResponseOptions {
constructor({ body, status, headers, statusText, type, url } = {}) {
this.body = isPresent(body) ? body : null;
this.status = isPresent(status) ? status : null;
this.headers = isPresent(headers) ? headers : null;
this.statusText = isPresent(statusText) ? statusText : null;
this.type = isPresent(type) ? type : null;
this.url = isPresent(url) ? url : null;
}
/**
* Creates a copy of the `ResponseOptions` instance, using the optional input as values to
* override
* existing values. This method will not change the values of the instance on which it is being
* called.
*
* This may be useful when sharing a base `ResponseOptions` object inside tests,
* where certain properties may change from test to test.
*
* Example ([live demo](http://plnkr.co/edit/1lXquqFfgduTFBWjNoRE?p=preview))
*
* ```typescript
* import {ResponseOptions, Response} from 'angular2/http';
*
* var options = new ResponseOptions({
* body: {name: 'Jeff'}
* });
* var res = new Response(options.merge({
* url: 'https://google.com'
* }));
* console.log('options.url:', options.url); // null
* console.log('res.json():', res.json()); // Object {name: "Jeff"}
* console.log('res.url:', res.url); // https://google.com
* ```
*/
merge(options) {
|
status: isPresent(options) && isPresent(options.status) ? options.status : this.status,
headers: isPresent(options) && isPresent(options.headers) ? options.headers : this.headers,
statusText: isPresent(options) && isPresent(options.statusText) ? options.statusText :
this.statusText,
type: isPresent(options) && isPresent(options.type) ? options.type : this.type,
url: isPresent(options) && isPresent(options.url) ? options.url : this.url,
});
}
}
/**
* Subclass of {@link ResponseOptions}, with default values.
*
* Default values:
* * status: 200
* * headers: empty {@link Headers} object
*
* This class could be extended and bound to the {@link ResponseOptions} class
* when configuring an {@link Injector}, in order to override the default options
* used by {@link Http} to create {@link Response Responses}.
*
* ### Example ([live demo](http://plnkr.co/edit/qv8DLT?p=preview))
*
* ```typescript
* import {provide, bootstrap} from 'angular2/angular2';
* import {HTTP_PROVIDERS, Headers, Http, BaseResponseOptions, ResponseOptions} from
* 'angular2/http';
* import {App} from './myapp';
*
* class MyOptions extends BaseResponseOptions {
* headers:Headers = new Headers({network: 'github'});
* }
*
* bootstrap(App, [HTTP_PROVIDERS, provide(ResponseOptions, {useClass: MyOptions})]);
* ```
*
* The options could also be extended when manually creating a {@link Response}
* object.
*
* ### Example ([live demo](http://plnkr.co/edit/VngosOWiaExEtbstDoix?p=preview))
*
* ```
* import {BaseResponseOptions, Response} from 'angular2/http';
*
* var options = new BaseResponseOptions();
* var res = new Response(options.merge({
* body: 'Angular2',
* headers: new Headers({framework: 'angular'})
* }));
* console.log('res.headers.get("framework"):', res.headers.get('framework')); // angular
* console.log('res.text():', res.text()); // Angular2;
* ```
*/
export let BaseResponseOptions = class extends ResponseOptions {
constructor() {
super({ status: 200, statusText: 'Ok', type: ResponseTypes.Default, headers: new Headers() });
}
};
BaseResponseOptions = __decorate([
Injectable(),
__metadata('design:paramtypes', [])
], BaseResponseOptions);
//# sourceMappingURL=base_response_options.js.map
|
return new ResponseOptions({
body: isPresent(options) && isPresent(options.body) ? options.body : this.body,
|
0_7_2d_perlin_noise.rs
|
extern crate nannou;
extern crate rand;
use nannou::noise::{NoiseFn, Perlin};
use nannou::prelude::*;
fn main() {
nannou::app(model).run();
}
struct Model {
pixels: Vec<(i32, i32, u8)>,
}
impl Model {
fn new(pixels: Vec<(i32, i32, u8)>) -> Model {
Model { pixels }
}
}
fn
|
(app: &App) -> Model {
app.new_window().size(640, 360).view(view).build().unwrap();
let window = app.window_rect();
let (width, height) = window.w_h();
let Vector2 {
x: start_x,
y: start_y,
} = window.bottom_left();
let perlin = Perlin::new();
let mut pixels = Vec::new();
let mut xoff = 0.0;
for x in (start_x as i32)..(width as i32) {
let mut yoff = 0.0;
for y in (start_y as i32)..(height as i32) {
let color = map_range(perlin.get([xoff, yoff]), 0.0, 1.0, 0.0, 255.0);
pixels.push((x, y, color as u8));
yoff += 0.01;
}
xoff += 0.01;
}
Model::new(pixels)
}
fn view(app: &App, model: &Model, frame: Frame) {
let draw = app.draw();
draw.background().color(WHITE);
for (x, y, color) in &model.pixels {
let clr = color.clone();
draw
.rect()
.x_y(x.clone() as f32, y.clone() as f32)
.w_h(1.0, 1.0)
.rgb8(clr, clr, clr);
}
draw.to_frame(app, &frame).unwrap();
}
|
model
|
struct.rs
|
// A type `Borrowed` which houses a reference to an
// `i32`. The reference to `i32` must outlive `Borrowed`.
#[derive(Debug)]
struct Borrowed<'a>(&'a i32);
// Similarly, both references here must outlive this structure.
#[derive(Debug)]
struct NamedBorrowed<'a> {
x: &'a i32,
y: &'a i32,
}
// An enum which is either an `i32` or a reference to one.
#[derive(Debug)]
enum Either<'a> {
Num(i32),
Ref(&'a i32),
}
fn main()
|
{
let x = 18;
let y = 15;
let single = Borrowed(&x);
let double = NamedBorrowed { x: &x, y: &y };
let reference = Either::Ref(&x);
let number = Either::Num(y);
println!("x is borrowed in {:?}", single);
println!("x and y are borrowed in {:?}", double);
println!("x is borrowed in {:?}", reference);
println!("y is *not* borrowed in {:?}", number);
}
|
|
niatelemetry_bootflash_details_all_of.py
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NiatelemetryBootflashDetailsAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
('object_type',): {
'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'fw_rev': (str,), # noqa: E501
'model_type': (str,), # noqa: E501
'serial': (str,), # noqa: E501
}
@cached_property
def
|
():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'fw_rev': 'FwRev', # noqa: E501
'model_type': 'ModelType', # noqa: E501
'serial': 'Serial', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetryBootflashDetailsAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "niatelemetry.BootflashDetails", must be one of ["niatelemetry.BootflashDetails", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "niatelemetry.BootflashDetails", must be one of ["niatelemetry.BootflashDetails", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fw_rev (str): Return firmware revision in boot flash details.. [optional] # noqa: E501
model_type (str): Return model type in boot flash details.. [optional] # noqa: E501
serial (str): Return serial id in boot flash details.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "niatelemetry.BootflashDetails")
object_type = kwargs.get('object_type', "niatelemetry.BootflashDetails")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
discriminator
|
serializers.py
|
from rest_framework import serializers
from product.models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
|
class ProductListSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('store',)
|
model = Product
fields = '__all__'
|
base_lithium_ion_model.py
|
#
# Lithium-ion base model class
#
import pybamm
class BaseModel(pybamm.BaseBatteryModel):
"""
Overwrites default parameters from Base Model with default parameters for
lithium-ion models
**Extends:** :class:`pybamm.BaseBatteryModel`
"""
def __init__(self, options=None, name="Unnamed lithium-ion model", build=False):
super().__init__(options, name)
self.param = pybamm.LithiumIonParameters(options)
# Default timescale is discharge timescale
self.timescale = self.param.tau_discharge
# Set default length scales
self.length_scales = {
"negative electrode": self.param.L_x,
"separator": self.param.L_x,
"positive electrode": self.param.L_x,
"negative particle": self.param.R_n_typ,
"positive particle": self.param.R_p_typ,
"current collector y": self.param.L_y,
"current collector z": self.param.L_z,
}
self.set_standard_output_variables()
def set_standard_output_variables(self):
super().set_standard_output_variables()
# Particle concentration position
var = pybamm.standard_spatial_vars
self.variables.update(
{
"r_n": var.r_n,
"r_n [m]": var.r_n * self.param.R_n_typ,
"r_p": var.r_p,
"r_p [m]": var.r_p * self.param.R_p_typ,
}
)
def set_sei_submodel(self):
# negative electrode SEI
if self.options["sei"] == "none":
self.submodels["negative sei"] = pybamm.sei.NoSEI(self.param, "Negative")
if self.options["sei"] == "constant":
self.submodels["negative sei"] = pybamm.sei.ConstantSEI(
self.param, "Negative"
)
elif self.options["sei"] == "reaction limited":
self.submodels["negative sei"] = pybamm.sei.ReactionLimited(
self.param, "Negative"
)
elif self.options["sei"] == "solvent-diffusion limited":
self.submodels["negative sei"] = pybamm.sei.SolventDiffusionLimited(
self.param, "Negative"
)
elif self.options["sei"] == "electron-migration limited":
self.submodels["negative sei"] = pybamm.sei.ElectronMigrationLimited(
self.param, "Negative"
)
elif self.options["sei"] == "interstitial-diffusion limited":
self.submodels["negative sei"] = pybamm.sei.InterstitialDiffusionLimited(
self.param, "Negative"
)
elif self.options["sei"] == "ec reaction limited":
self.submodels["negative sei"] = pybamm.sei.EcReactionLimited(
self.param, "Negative"
)
# positive electrode
self.submodels["positive sei"] = pybamm.sei.NoSEI(self.param, "Positive")
def set_other_reaction_submodels_to_zero(self):
|
def set_crack_submodel(self):
if self.options["particle cracking"] == "none":
return
if self.options["particle cracking"] == "no cracking":
n = pybamm.particle_cracking.NoCracking(self.param, "Negative")
p = pybamm.particle_cracking.NoCracking(self.param, "Positive")
elif self.options["particle cracking"] == "cathode":
n = pybamm.particle_cracking.NoCracking(self.param, "Negative")
p = pybamm.particle_cracking.CrackPropagation(self.param, "Positive")
elif self.options["particle cracking"] == "anode":
n = pybamm.particle_cracking.CrackPropagation(self.param, "Negative")
p = pybamm.particle_cracking.NoCracking(self.param, "Positive")
else:
n = pybamm.particle_cracking.CrackPropagation(self.param, "Negative")
p = pybamm.particle_cracking.CrackPropagation(self.param, "Positive")
self.submodels["negative particle cracking"] = n
self.submodels["positive particle cracking"] = p
|
self.submodels["negative oxygen interface"] = pybamm.interface.NoReaction(
self.param, "Negative", "lithium-ion oxygen"
)
self.submodels["positive oxygen interface"] = pybamm.interface.NoReaction(
self.param, "Positive", "lithium-ion oxygen"
)
|
shipment_summary.go
|
package paperwork
import (
"errors"
"time"
"github.com/transcom/mymove/pkg/appcontext"
"github.com/transcom/mymove/pkg/route"
"github.com/transcom/mymove/pkg/rateengine"
"github.com/transcom/mymove/pkg/unit"
"github.com/transcom/mymove/pkg/models"
)
type ppmComputer interface {
ComputePPMMoveCosts(appCtx appcontext.AppContext, weight unit.Pound, originPickupZip5 string, originDutyStationZip5 string, destinationZip5 string, distanceMilesFromOriginPickupZip int, distanceMilesFromOriginDutyStationZip int, date time.Time, daysInSit int) (cost rateengine.CostDetails, err error)
}
//SSWPPMComputer a rate engine wrapper with helper functions to simplify ppm cost calculations specific to shipment summary worksheet
type SSWPPMComputer struct {
ppmComputer
}
//NewSSWPPMComputer creates a SSWPPMComputer
func NewSSWPPMComputer(PPMComputer ppmComputer) *SSWPPMComputer
|
//ObligationType type corresponding to obligation sections of shipment summary worksheet
type ObligationType int
//ComputeObligations is helper function for computing the obligations section of the shipment summary worksheet
func (sswPpmComputer *SSWPPMComputer) ComputeObligations(appCtx appcontext.AppContext, ssfd models.ShipmentSummaryFormData, planner route.Planner) (obligation models.Obligations, err error) {
firstPPM, err := sswPpmComputer.nilCheckPPM(ssfd)
if err != nil {
return models.Obligations{}, err
}
originDutyStationZip := ssfd.CurrentDutyStation.Address.PostalCode
destDutyStationZip := ssfd.Order.NewDutyStation.Address.PostalCode
distanceMilesFromPickupZip, err := planner.Zip5TransitDistanceLineHaul(appCtx, *firstPPM.PickupPostalCode, destDutyStationZip)
if err != nil {
return models.Obligations{}, errors.New("error calculating distance")
}
distanceMilesFromDutyStationZip, err := planner.Zip5TransitDistanceLineHaul(appCtx, originDutyStationZip, destDutyStationZip)
if err != nil {
return models.Obligations{}, errors.New("error calculating distance")
}
actualCosts, err := sswPpmComputer.ComputePPMMoveCosts(
appCtx,
ssfd.PPMRemainingEntitlement,
*firstPPM.PickupPostalCode,
originDutyStationZip,
destDutyStationZip,
distanceMilesFromPickupZip,
distanceMilesFromDutyStationZip,
*firstPPM.OriginalMoveDate,
0,
)
if err != nil {
return models.Obligations{}, errors.New("error calculating PPM actual obligations")
}
maxCosts, err := sswPpmComputer.ComputePPMMoveCosts(
appCtx,
ssfd.WeightAllotment.TotalWeight,
*firstPPM.PickupPostalCode,
originDutyStationZip,
destDutyStationZip,
distanceMilesFromPickupZip,
distanceMilesFromDutyStationZip,
*firstPPM.OriginalMoveDate,
0,
)
if err != nil {
return models.Obligations{}, errors.New("error calculating PPM max obligations")
}
actualCost := rateengine.GetWinningCostMove(actualCosts)
maxCost := rateengine.GetWinningCostMove(maxCosts)
nonWinningActualCost := rateengine.GetNonWinningCostMove(actualCosts)
nonWinningMaxCost := rateengine.GetNonWinningCostMove(maxCosts)
var actualSIT unit.Cents
if firstPPM.TotalSITCost != nil {
actualSIT = *firstPPM.TotalSITCost
}
if actualSIT > maxCost.SITMax {
actualSIT = maxCost.SITMax
}
obligations := models.Obligations{
ActualObligation: models.Obligation{Gcc: actualCost.GCC, SIT: actualSIT, Miles: unit.Miles(actualCost.Mileage)},
MaxObligation: models.Obligation{Gcc: maxCost.GCC, SIT: actualSIT, Miles: unit.Miles(actualCost.Mileage)},
NonWinningActualObligation: models.Obligation{Gcc: nonWinningActualCost.GCC, SIT: actualSIT, Miles: unit.Miles(nonWinningActualCost.Mileage)},
NonWinningMaxObligation: models.Obligation{Gcc: nonWinningMaxCost.GCC, SIT: actualSIT, Miles: unit.Miles(nonWinningActualCost.Mileage)},
}
return obligations, nil
}
func (sswPpmComputer *SSWPPMComputer) nilCheckPPM(ssfd models.ShipmentSummaryFormData) (models.PersonallyProcuredMove, error) {
if len(ssfd.PersonallyProcuredMoves) == 0 {
return models.PersonallyProcuredMove{}, errors.New("missing ppm")
}
firstPPM := ssfd.PersonallyProcuredMoves[0]
if firstPPM.PickupPostalCode == nil || firstPPM.DestinationPostalCode == nil {
return models.PersonallyProcuredMove{}, errors.New("missing required address parameter")
}
if firstPPM.OriginalMoveDate == nil {
return models.PersonallyProcuredMove{}, errors.New("missing required original move date parameter")
}
return firstPPM, nil
}
|
{
return &SSWPPMComputer{ppmComputer: PPMComputer}
}
|
parallelwave-mpi.py
|
#!/usr/bin/env python
"""
A simple python program of solving a 2D wave equation in parallel.
Domain partitioning and inter-processor communication
are done by an object of class MPIRectPartitioner2D
(which is a subclass of RectPartitioner2D and uses MPI via mpi4py)
An example of running the program is (8 processors, 4x2 partition,
400x100 grid cells)::
$ ipclusterz start --profile mpi -n 8 # start 8 engines (assuming mpi profile has been configured)
$ ./parallelwave-mpi.py --grid 400 100 --partition 4 2 --profile mpi
See also parallelwave-mpi, which runs the same program, but uses MPI
(via mpi4py) for the inter-engine communication.
Authors
-------
* Xing Cai
* Min Ragan-Kelley
"""
import sys
import time
from numpy import exp, zeros, newaxis, sqrt
from IPython.external import argparse
from IPython.parallel import Client, Reference
def setup_partitioner(index, num_procs, gnum_cells, parts):
"""create a partitioner in the engine namespace"""
global partitioner
p = MPIRectPartitioner2D(my_id=index, num_procs=num_procs)
p.redim(global_num_cells=gnum_cells, num_parts=parts)
p.prepare_communication()
# put the partitioner into the global namespace:
partitioner=p
def setup_solver(*args, **kwargs):
"""create a WaveSolver in the engine namespace"""
global solver
solver = WaveSolver(*args, **kwargs)
def wave_saver(u, x, y, t):
"""save the wave log"""
global u_hist
global t_hist
t_hist.append(t)
u_hist.append(1.0*u)
# main program:
if __name__ == '__main__':
parser = argparse.ArgumentParser()
paa = parser.add_argument
paa('--grid', '-g',
type=int, nargs=2, default=[100,100], dest='grid',
help="Cells in the grid, e.g. --grid 100 200")
paa('--partition', '-p',
type=int, nargs=2, default=None,
help="Process partition grid, e.g. --partition 4 2 for 4x2")
paa('-c',
type=float, default=1.,
help="Wave speed (I think)")
paa('-Ly',
type=float, default=1.,
help="system size (in y)")
paa('-Lx',
type=float, default=1.,
help="system size (in x)")
paa('-t', '--tstop',
type=float, default=1.,
help="Time units to run")
paa('--profile',
type=unicode, default=u'default',
help="Specify the ipcluster profile for the client to connect to.")
paa('--save',
action='store_true',
help="Add this flag to save the time/wave history during the run.")
paa('--scalar',
action='store_true',
help="Also run with scalar interior implementation, to see vector speedup.")
ns = parser.parse_args()
# set up arguments
grid = ns.grid
partition = ns.partition
Lx = ns.Lx
Ly = ns.Ly
c = ns.c
tstop = ns.tstop
if ns.save:
user_action = wave_saver
else:
user_action = None
num_cells = 1.0*(grid[0]-1)*(grid[1]-1)
final_test = True
# create the Client
rc = Client(profile=ns.profile)
num_procs = len(rc.ids)
if partition is None:
partition = [1,num_procs]
assert partition[0]*partition[1] == num_procs, "can't map partition %s to %i engines"%(partition, num_procs)
view = rc[:]
print "Running %s system on %s processes until %f"%(grid, partition, tstop)
# functions defining initial/boundary/source conditions
def I(x,y):
from numpy import exp
return 1.5*exp(-100*((x-0.5)**2+(y-0.5)**2))
def f(x,y,t):
return 0.0
# from numpy import exp,sin
# return 10*exp(-(x - sin(100*t))**2)
def bc(x,y,t):
return 0.0
# initial imports, setup rank
view.execute('\n'.join([
"from mpi4py import MPI",
"import numpy",
"mpi = MPI.COMM_WORLD",
"my_id = MPI.COMM_WORLD.Get_rank()"]), block=True)
# initialize t_hist/u_hist for saving the state at each step (optional)
view['t_hist'] = []
view['u_hist'] = []
# set vector/scalar implementation details
impl = {}
impl['ic'] = 'vectorized'
impl['inner'] = 'scalar'
impl['bc'] = 'vectorized'
# execute some files so that the classes we need will be defined on the engines:
view.run('RectPartitioner.py')
view.run('wavesolver.py')
# setup remote partitioner
# note that Reference means that the argument passed to setup_partitioner will be the
# object named 'my_id' in the engine's namespace
view.apply_sync(setup_partitioner, Reference('my_id'), num_procs, grid, partition)
# wait for initial communication to complete
view.execute('mpi.barrier()')
# setup remote solvers
view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
# lambda for calling solver.solve:
_solve = lambda *args, **kwargs: solver.solve(*args, **kwargs)
if ns.scalar:
impl['inner'] = 'scalar'
# run first with element-wise Python operations for each cell
t0 = time.time()
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action)
|
# the L2 norm (RMS) of the result:
norm = sqrt(s/num_cells)
else:
norm = -1
t1 = time.time()
print 'scalar inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
impl['inner'] = 'vectorized'
# setup new solvers
view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
view.execute('mpi.barrier()')
# run again with numpy vectorized inner-implementation
t0 = time.time()
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test)#, user_action=wave_saver)
if final_test:
# this sum is performed element-wise as results finish
s = sum(ar)
# the L2 norm (RMS) of the result:
norm = sqrt(s/num_cells)
else:
norm = -1
t1 = time.time()
print 'vector inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
# if ns.save is True, then u_hist stores the history of u as a list
# If the partion scheme is Nx1, then u can be reconstructed via 'gather':
if ns.save and partition[-1] == 1:
import pylab
view.execute('u_last=u_hist[-1]')
# map mpi IDs to IPython IDs, which may not match
ranks = view['my_id']
targets = range(len(ranks))
for idx in range(len(ranks)):
targets[idx] = ranks.index(idx)
u_last = rc[targets].gather('u_last', block=True)
pylab.pcolor(u_last)
pylab.show()
|
if final_test:
# this sum is performed element-wise as results finish
s = sum(ar)
|
gsl.py
|
import ctypes
from ctypes import (POINTER, c_char_p, c_size_t, c_int, c_long, c_ulong,
c_double, c_void_p)
from ctypes.util import find_library
class _c_gsl_rng_type(ctypes.Structure):
_fields_ = [('name', c_char_p),
('max', c_long),
('min', c_size_t),
('__set', c_void_p),
('__get', c_void_p),
('__get_double', c_void_p),
]
_c_gsl_rng_type_p = POINTER(_c_gsl_rng_type)
class _c_gsl_rng(ctypes.Structure):
_fields_ = [('type', _c_gsl_rng_type_p),
('state', c_void_p)]
_c_gsl_rng_p = POINTER(_c_gsl_rng)
class _GSLFuncLoader(object):
# see: http://code.activestate.com/recipes/576549-gsl-with-python3/
gslcblas = ctypes.CDLL(find_library('gslcblas'), mode=ctypes.RTLD_GLOBAL)
gsl = ctypes.CDLL(find_library('gsl'))
def _load_1(self, name, argtypes=None, restype=None):
func = getattr(self.gsl, name)
if argtypes is not None:
func.argtypes = argtypes
if restype is not None:
func.restype = restype
setattr(self, name, func)
return func
def _load(self, name, argtypes=None, restype=None):
if isinstance(name, str):
return self._load_1(name, argtypes, restype)
else:
try:
return [self._load_1(n, argtypes, restype) for n in name]
except TypeError:
raise ValueError('name=%r should be a string or iterative '
'of string' % name)
func = _GSLFuncLoader()
func._load('gsl_strerror', [c_int], c_char_p)
func._load('gsl_rng_alloc', [_c_gsl_rng_type_p], _c_gsl_rng_p)
func._load('gsl_rng_set', [_c_gsl_rng_p, c_ulong])
func._load('gsl_rng_free', [_c_gsl_rng_p])
func._load('gsl_rng_types_setup',
restype=c_void_p) # POINTER(_c_gsl_rng_p)
func._load('gsl_rng_state', [_c_gsl_rng_p], c_void_p)
func._load('gsl_rng_size', [_c_gsl_rng_p], c_size_t)
func._load(['gsl_ran_gaussian',
'gsl_ran_gaussian_ziggurat',
'gsl_ran_gaussian_ratio_method'],
[_c_gsl_rng_p, c_double],
c_double)
gsl_strerror = func.gsl_strerror
def _get_gsl_rng_type_p_dict():
"""
Get all ``gsl_rng_type`` as dict which has pointer to each object
This is equivalent to C code bellow which is from GSL document:
.. sourcecode:: c
const gsl_rng_type **t, **t0;
t0 = gsl_rng_types_setup ();
for (t = t0; *t != 0; t++)
{
printf ("%s\n", (*t)->name); /* instead, store t to dict */
}
"""
t = func.gsl_rng_types_setup()
dt = ctypes.sizeof(c_void_p)
dct = {}
while True:
a = c_void_p.from_address(t)
if a.value is None:
break
name = c_char_p.from_address(a.value).value
name = name.decode() # for Python 3 (bytes to str)
dct[name] = ctypes.cast(a, _c_gsl_rng_type_p)
t += dt
return dct
class gsl_rng(object):
_gsl_rng_alloc = func.gsl_rng_alloc
_gsl_rng_set = func.gsl_rng_set
_gsl_rng_free = func.gsl_rng_free
_gsl_rng_type_p_dict = _get_gsl_rng_type_p_dict()
_ctype_ = _c_gsl_rng_p # for railgun
def __init__(self, seed=None, name='mt19937'):
self._gsl_rng_name = name
self._gsl_rng_type_p = self._gsl_rng_type_p_dict[name]
self._cdata_ = self._gsl_rng_alloc(self._gsl_rng_type_p)
# the name '_cdata_' is for railgun
if seed is not None:
self.set(seed)
def __setstate__(self, data):
(attrs, state) = data
self.__init__(name=attrs.pop('_gsl_rng_name'))
self.__dict__.update(attrs)
self.set_state(state)
def __getstate__(self):
attrs = self.__dict__.copy()
del attrs['_gsl_rng_type_p']
del attrs['_cdata_']
return (attrs, self.get_state())
def __copy__(self):
|
def __del__(self):
self._gsl_rng_free(self._cdata_)
def set(self, seed):
self._gsl_rng_set(self._cdata_, seed)
_gsl_ran_gaussian = {
'': func.gsl_ran_gaussian,
'ziggurat': func.gsl_ran_gaussian_ziggurat,
'ratio_method': func.gsl_ran_gaussian_ratio_method,
}
def ran_gaussian(self, sigma=1.0, method=''):
return self._gsl_ran_gaussian[method](self._cdata_, sigma)
def get_state(self):
"""
Return state of the random number generator as a byte string.
"""
ptr = func.gsl_rng_state(self._cdata_)
size = func.gsl_rng_size(self._cdata_)
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, ptr, size)
return buf.raw
def set_state(self, state):
"""
Set state returned by :meth:`get_state`.
"""
ptr = func.gsl_rng_state(self._cdata_)
size = func.gsl_rng_size(self._cdata_)
given_size = len(state)
# Pass size explicitly, otherwise it will create a buffer with
# extra NULL terminator in it:
buf = ctypes.create_string_buffer(state, given_size)
if given_size != size:
raise ValueError(
'Trying to set incompatible length of state. '
'Size of the given state is {0} while {1} is required. '
.format(given_size, size))
ctypes.memmove(ptr, buf, size)
def plot_gaussian(method='', sigma=1, show=True):
import pylab
rng = gsl_rng()
pylab.hist(
[rng.ran_gaussian(method=method, sigma=sigma) for i in range(10000)],
bins=100, normed=True)
if show:
pylab.show()
def print_error_codes():
for i in range(1000):
error_message = gsl_strerror(i)
if error_message != 'unknown error code':
print('% 4d: "%s"' % (i, error_message))
def main():
import sys
cmd2func = dict(
print_error_codes=print_error_codes,
plot_gaussian=plot_gaussian,
)
if len(sys.argv) == 0:
print('Please specify command or code to execute\n')
for name in sorted(cmd2func):
print(name)
else:
(cmd,) = sys.argv[1:]
if cmd in cmd2func:
print("Calling function: %s" % cmd)
cmd2func[cmd]()
else:
print("Executing code: %s" % cmd)
ret = eval(cmd, globals())
if ret is not None:
print("Returned %r" % ret)
if __name__ == '__main__':
main()
|
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
return clone
|
defer.js
|
import { combine } from './'
const defer = stream => {
let ignore = stream.initialized
return combine
(([ stream ], self) => {
if (ignore) {
ignore = false
|
self.set(stream.get())
})
([ stream ])
}
export default defer
|
return
}
|
basename.rs
|
// This file is part of the uutils coreutils package.
//
// (c) Jimmy Lu <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) fullname
#[macro_use]
extern crate uucore;
use clap::{crate_version, App, Arg};
use std::path::{is_separator, PathBuf};
use uucore::InvalidEncodingHandling;
static SUMMARY: &str = "Print NAME with any leading directory components removed
If specified, also remove a trailing SUFFIX";
fn usage() -> String {
format!(
"{0} NAME [SUFFIX]
{0} OPTION... NAME...",
uucore::execution_phrase()
)
}
pub mod options {
pub static MULTIPLE: &str = "multiple";
pub static NAME: &str = "name";
pub static SUFFIX: &str = "suffix";
pub static ZERO: &str = "zero";
}
pub fn uumain(args: impl uucore::Args) -> i32 {
let args = args
.collect_str(InvalidEncodingHandling::ConvertLossy)
.accept_any();
let usage = usage();
//
// Argument parsing
//
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
// too few arguments
if !matches.is_present(options::NAME) {
crash!(
1,
"{1}\nTry '{0} --help' for more information.",
uucore::execution_phrase(),
"missing operand"
);
}
let opt_suffix = matches.is_present(options::SUFFIX);
let opt_multiple = matches.is_present(options::MULTIPLE);
let opt_zero = matches.is_present(options::ZERO);
let multiple_paths = opt_suffix || opt_multiple;
// too many arguments
if !multiple_paths && matches.occurrences_of(options::NAME) > 2 {
crash!(
1,
"extra operand '{1}'\nTry '{0} --help' for more information.",
uucore::execution_phrase(),
matches.values_of(options::NAME).unwrap().nth(2).unwrap()
);
}
let suffix = if opt_suffix {
matches.value_of(options::SUFFIX).unwrap()
} else if !opt_multiple && matches.occurrences_of(options::NAME) > 1 {
matches.values_of(options::NAME).unwrap().nth(1).unwrap()
} else {
""
};
//
// Main Program Processing
//
let paths: Vec<_> = if multiple_paths {
matches.values_of(options::NAME).unwrap().collect()
} else {
matches.values_of(options::NAME).unwrap().take(1).collect()
};
let line_ending = if opt_zero { "\0" } else { "\n" };
for path in paths {
print!("{}{}", basename(path, suffix), line_ending);
}
0
}
pub fn uu_app() -> App<'static, 'static> {
App::new(uucore::util_name())
.version(crate_version!())
.about(SUMMARY)
.arg(
Arg::with_name(options::MULTIPLE)
.short("a")
.long(options::MULTIPLE)
.help("support multiple arguments and treat each as a NAME"),
)
.arg(Arg::with_name(options::NAME).multiple(true).hidden(true))
.arg(
Arg::with_name(options::SUFFIX)
.short("s")
.long(options::SUFFIX)
.value_name("SUFFIX")
.help("remove a trailing SUFFIX; implies -a"),
)
.arg(
Arg::with_name(options::ZERO)
.short("z")
.long(options::ZERO)
.help("end each output line with NUL, not newline"),
)
}
fn basename(fullname: &str, suffix: &str) -> String
|
{
// Remove all platform-specific path separators from the end.
let path = fullname.trim_end_matches(is_separator);
// If the path contained *only* suffix characters (for example, if
// `fullname` were "///" and `suffix` were "/"), then `path` would
// be left with the empty string. In that case, we set `path` to be
// the original `fullname` to avoid returning the empty path.
let path = if path.is_empty() { fullname } else { path };
// Convert to path buffer and get last path component
let pb = PathBuf::from(path);
match pb.components().last() {
Some(c) => {
let name = c.as_os_str().to_str().unwrap();
if name == suffix {
name.to_string()
} else {
name.strip_suffix(suffix).unwrap_or(name).to_string()
}
}
None => "".to_owned(),
}
}
|
|
it_VA.go
|
package it_VA
import (
"math"
"strconv"
"time"
"package/locales"
"package/locales/currency"
)
type it_VA struct {
locale string
pluralsCardinal []locales.PluralRule
pluralsOrdinal []locales.PluralRule
pluralsRange []locales.PluralRule
decimal string
group string
minus string
percent string
perMille string
timeSeparator string
inifinity string
currencies []string // idx = enum of currency code
currencyPositiveSuffix string
currencyNegativeSuffix string
monthsAbbreviated []string
monthsNarrow []string
monthsWide []string
daysAbbreviated []string
daysNarrow []string
daysShort []string
daysWide []string
periodsAbbreviated []string
periodsNarrow []string
periodsShort []string
periodsWide []string
erasAbbreviated []string
erasNarrow []string
erasWide []string
timezones map[string]string
}
// New returns a new instance of translator for the 'it_VA' locale
func New() locales.Translator
|
) string {
return it.locale
}
// PluralsCardinal returns the list of cardinal plural rules associated with 'it_VA'
func (it *it_VA) PluralsCardinal() []locales.PluralRule {
return it.pluralsCardinal
}
// PluralsOrdinal returns the list of ordinal plural rules associated with 'it_VA'
func (it *it_VA) PluralsOrdinal() []locales.PluralRule {
return it.pluralsOrdinal
}
// PluralsRange returns the list of range plural rules associated with 'it_VA'
func (it *it_VA) PluralsRange() []locales.PluralRule {
return it.pluralsRange
}
// CardinalPluralRule returns the cardinal PluralRule given 'num' and digits/precision of 'v' for 'it_VA'
func (it *it_VA) CardinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
i := int64(n)
if i == 1 && v == 0 {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// OrdinalPluralRule returns the ordinal PluralRule given 'num' and digits/precision of 'v' for 'it_VA'
func (it *it_VA) OrdinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
if n == 11 || n == 8 || n == 80 || n == 800 {
return locales.PluralRuleMany
}
return locales.PluralRuleOther
}
// RangePluralRule returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for 'it_VA'
func (it *it_VA) RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) locales.PluralRule {
start := it.CardinalPluralRule(num1, v1)
end := it.CardinalPluralRule(num2, v2)
if start == locales.PluralRuleOne && end == locales.PluralRuleOther {
return locales.PluralRuleOther
} else if start == locales.PluralRuleOther && end == locales.PluralRuleOne {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// MonthAbbreviated returns the locales abbreviated month given the 'month' provided
func (it *it_VA) MonthAbbreviated(month time.Month) string {
return it.monthsAbbreviated[month]
}
// MonthsAbbreviated returns the locales abbreviated months
func (it *it_VA) MonthsAbbreviated() []string {
return it.monthsAbbreviated[1:]
}
// MonthNarrow returns the locales narrow month given the 'month' provided
func (it *it_VA) MonthNarrow(month time.Month) string {
return it.monthsNarrow[month]
}
// MonthsNarrow returns the locales narrow months
func (it *it_VA) MonthsNarrow() []string {
return it.monthsNarrow[1:]
}
// MonthWide returns the locales wide month given the 'month' provided
func (it *it_VA) MonthWide(month time.Month) string {
return it.monthsWide[month]
}
// MonthsWide returns the locales wide months
func (it *it_VA) MonthsWide() []string {
return it.monthsWide[1:]
}
// WeekdayAbbreviated returns the locales abbreviated weekday given the 'weekday' provided
func (it *it_VA) WeekdayAbbreviated(weekday time.Weekday) string {
return it.daysAbbreviated[weekday]
}
// WeekdaysAbbreviated returns the locales abbreviated weekdays
func (it *it_VA) WeekdaysAbbreviated() []string {
return it.daysAbbreviated
}
// WeekdayNarrow returns the locales narrow weekday given the 'weekday' provided
func (it *it_VA) WeekdayNarrow(weekday time.Weekday) string {
return it.daysNarrow[weekday]
}
// WeekdaysNarrow returns the locales narrow weekdays
func (it *it_VA) WeekdaysNarrow() []string {
return it.daysNarrow
}
// WeekdayShort returns the locales short weekday given the 'weekday' provided
func (it *it_VA) WeekdayShort(weekday time.Weekday) string {
return it.daysShort[weekday]
}
// WeekdaysShort returns the locales short weekdays
func (it *it_VA) WeekdaysShort() []string {
return it.daysShort
}
// WeekdayWide returns the locales wide weekday given the 'weekday' provided
func (it *it_VA) WeekdayWide(weekday time.Weekday) string {
return it.daysWide[weekday]
}
// WeekdaysWide returns the locales wide weekdays
func (it *it_VA) WeekdaysWide() []string {
return it.daysWide
}
// Decimal returns the decimal point of number
func (it *it_VA) Decimal() string {
return it.decimal
}
// Group returns the group of number
func (it *it_VA) Group() string {
return it.group
}
// Group returns the minus sign of number
func (it *it_VA) Minus() string {
return it.minus
}
// FmtNumber returns 'num' with digits/precision of 'v' for 'it_VA' and handles both Whole and Real numbers based on 'v'
func (it *it_VA) FmtNumber(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 2 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, it.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, it.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, it.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
return string(b)
}
// FmtPercent returns 'num' with digits/precision of 'v' for 'it_VA' and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
func (it *it_VA) FmtPercent(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 3
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, it.decimal[0])
continue
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, it.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
b = append(b, it.percent...)
return string(b)
}
// FmtCurrency returns the currency representation of 'num' with digits/precision of 'v' for 'it_VA'
func (it *it_VA) FmtCurrency(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := it.currencies[currency]
l := len(s) + len(symbol) + 4 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, it.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, it.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, it.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, it.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
b = append(b, it.currencyPositiveSuffix...)
b = append(b, symbol...)
return string(b)
}
// FmtAccounting returns the currency representation of 'num' with digits/precision of 'v' for 'it_VA'
// in accounting notation.
func (it *it_VA) FmtAccounting(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := it.currencies[currency]
l := len(s) + len(symbol) + 4 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, it.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, it.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, it.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, it.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
if num < 0 {
b = append(b, it.currencyNegativeSuffix...)
b = append(b, symbol...)
} else {
b = append(b, it.currencyPositiveSuffix...)
b = append(b, symbol...)
}
return string(b)
}
// FmtDateShort returns the short date representation of 't' for 'it_VA'
func (it *it_VA) FmtDateShort(t time.Time) string {
b := make([]byte, 0, 32)
if t.Day() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2f}...)
if t.Month() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Month()), 10)
b = append(b, []byte{0x2f}...)
if t.Year() > 9 {
b = append(b, strconv.Itoa(t.Year())[2:]...)
} else {
b = append(b, strconv.Itoa(t.Year())[1:]...)
}
return string(b)
}
// FmtDateMedium returns the medium date representation of 't' for 'it_VA'
func (it *it_VA) FmtDateMedium(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, it.monthsAbbreviated[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateLong returns the long date representation of 't' for 'it_VA'
func (it *it_VA) FmtDateLong(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, it.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateFull returns the full date representation of 't' for 'it_VA'
func (it *it_VA) FmtDateFull(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, it.daysWide[t.Weekday()]...)
b = append(b, []byte{0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, it.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtTimeShort returns the short time representation of 't' for 'it_VA'
func (it *it_VA) FmtTimeShort(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, it.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
return string(b)
}
// FmtTimeMedium returns the medium time representation of 't' for 'it_VA'
func (it *it_VA) FmtTimeMedium(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, it.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, it.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
return string(b)
}
// FmtTimeLong returns the long time representation of 't' for 'it_VA'
func (it *it_VA) FmtTimeLong(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, it.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, it.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
b = append(b, tz...)
return string(b)
}
// FmtTimeFull returns the full time representation of 't' for 'it_VA'
func (it *it_VA) FmtTimeFull(t time.Time) string {
b := make([]byte, 0, 32)
if t.Hour() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, it.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, it.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
if btz, ok := it.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
return string(b)
}
|
{
return &it_VA{
locale: "it_VA",
pluralsCardinal: []locales.PluralRule{2, 6},
pluralsOrdinal: []locales.PluralRule{5, 6},
pluralsRange: []locales.PluralRule{2, 6},
decimal: ",",
group: ".",
minus: "-",
percent: "%",
perMille: "‰",
timeSeparator: ":",
inifinity: "∞",
currencies: []string{"ADP", "AED", "AFA", "AFN", "ALK", "ALL", "AMD", "ANG", "AOA", "AOK", "AON", "AOR", "ARA", "ARL", "ARM", "ARP", "ARS", "ATS", "AUD", "AWG", "AZM", "AZN", "BAD", "BAM", "BAN", "BBD", "BDT", "BEC", "BEF", "BEL", "BGL", "BGM", "BGN", "BGO", "BHD", "BIF", "BMD", "BND", "BOB", "BOL", "BOP", "BOV", "BRB", "BRC", "BRE", "BRL", "BRN", "BRR", "BRZ", "BSD", "BTN", "BUK", "BWP", "BYB", "BYN", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLE", "CLF", "CLP", "CNH", "CNX", "CNY", "COP", "COU", "CRC", "CSD", "CSK", "CUC", "CUP", "CVE", "CYP", "CZK", "DDM", "DEM", "DJF", "DKK", "DOP", "DZD", "ECS", "ECV", "EEK", "EGP", "ERN", "ESA", "ESB", "ESP", "ETB", "EUR", "FIM", "FJD", "FKP", "FRF", "GBP", "GEK", "GEL", "GHC", "GHS", "GIP", "GMD", "GNF", "GNS", "GQE", "GRD", "GTQ", "GWE", "GWP", "GYD", "HKD", "HNL", "HRD", "HRK", "HTG", "HUF", "IDR", "IEP", "ILP", "ILR", "ILS", "INR", "IQD", "IRR", "ISJ", "ISK", "ITL", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRH", "KRO", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LTT", "LUC", "LUF", "LUL", "LVL", "LVR", "LYD", "MAD", "MAF", "MCF", "MDC", "MDL", "MGA", "MGF", "MKD", "MKN", "MLF", "MMK", "MNT", "MOP", "MRO", "MTL", "MTP", "MUR", "MVP", "MVR", "MWK", "MXN", "MXP", "MXV", "MYR", "MZE", "MZM", "MZN", "NAD", "NGN", "NIC", "NIO", "NLG", "NOK", "NPR", "NZD", "OMR", "PAB", "PEI", "PEN", "PES", "PGK", "PHP", "PKR", "PLN", "PLZ", "PTE", "PYG", "QAR", "RHD", "ROL", "RON", "RSD", "RUB", "RUR", "RWF", "SAR", "SBD", "SCR", "SDD", "SDG", "SDP", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "SRG", "SSP", "STD", "STN", "SUR", "SVC", "SYP", "SZL", "THB", "TJR", "TJS", "TMM", "TMT", "TND", "TOP", "TPE", "TRL", "TRY", "TTD", "TWD", "TZS", "UAH", "UAK", "UGS", "UGX", "USD", "USN", "USS", "UYI", "UYP", "UYU", "UZS", "VEB", "VEF", "VND", "VNN", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XEU", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XRE", "XSU", "XTS", "XUA", "XXX", "YDD", "YER", "YUD", "YUM", "YUN", "YUR", "ZAL", "ZAR", "ZMK", "ZMW", "ZRN", "ZRZ", "ZWD", "ZWL", "ZWR"},
currencyPositiveSuffix: " ",
currencyNegativeSuffix: " ",
monthsAbbreviated: []string{"", "gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"},
monthsNarrow: []string{"", "G", "F", "M", "A", "M", "G", "L", "A", "S", "O", "N", "D"},
monthsWide: []string{"", "gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"},
daysAbbreviated: []string{"dom", "lun", "mar", "mer", "gio", "ven", "sab"},
daysNarrow: []string{"D", "L", "M", "M", "G", "V", "S"},
daysShort: []string{"dom", "lun", "mar", "mer", "gio", "ven", "sab"},
daysWide: []string{"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"},
periodsAbbreviated: []string{"AM", "PM"},
periodsNarrow: []string{"m.", "p."},
periodsWide: []string{"AM", "PM"},
erasAbbreviated: []string{"a.C.", "d.C."},
erasNarrow: []string{"aC", "dC"},
erasWide: []string{"avanti Cristo", "dopo Cristo"},
timezones: map[string]string{"SRT": "Ora del Suriname", "PST": "Ora standard del Pacifico USA", "PDT": "Ora legale del Pacifico USA", "JDT": "Ora legale del Giappone", "HNEG": "Ora standard della Groenlandia orientale", "HEOG": "Ora legale della Groenlandia occidentale", "HKST": "Ora legale di Hong Kong", "AKST": "Ora standard dell’Alaska", "CAT": "Ora dell’Africa centrale", "ART": "Ora standard dell’Argentina", "SAST": "Ora dell’Africa meridionale", "MYT": "Ora della Malesia", "ACWST": "Ora standard dell’Australia centroccidentale", "WITA": "Ora dell’Indonesia centrale", "TMST": "Ora legale del Turkmenistan", "OEZ": "Ora standard dell’Europa orientale", "COST": "Ora legale della Colombia", "CDT": "Ora legale centrale USA", "ACWDT": "Ora legale dell’Australia centroccidentale", "MESZ": "Ora legale dell’Europa centrale", "HNPM": "Ora standard di Saint-Pierre e Miquelon", "MDT": "MDT", "WAST": "Ora legale dell’Africa occidentale", "HNPMX": "Ora standard del Pacifico (Messico)", "ADT": "Ora legale dell’Atlantico", "ChST": "Ora di Chamorro", "CHAST": "Ora standard delle Chatham", "AST": "Ora standard dell’Atlantico", "HKT": "Ora standard di Hong Kong", "WART": "Ora standard dell’Argentina occidentale", "HENOMX": "Ora legale del Messico nord-occidentale", "MST": "MST", "AKDT": "Ora legale dell’Alaska", "WARST": "Ora legale dell’Argentina occidentale", "UYT": "Ora standard dell’Uruguay", "HEPMX": "Ora legale del Pacifico (Messico)", "HAST": "Ora standard delle Isole Hawaii-Aleutine", "JST": "Ora standard del Giappone", "EDT": "Ora legale orientale USA", "IST": "Ora standard dell’India", "EAT": "Ora dell’Africa orientale", "GYT": "Ora della Guyana", "AEST": "Ora standard dell’Australia orientale", "WESZ": "Ora legale dell’Europa occidentale", "ACDT": "Ora legale dell’Australia centrale", "HNOG": "Ora standard della Groenlandia occidentale", "HAT": "Ora legale di Terranova", "TMT": "Ora standard del Turkmenistan", "SGT": "Ora di Singapore", "LHDT": "Ora legale di Lord Howe", "HEPM": "Ora legale di Saint-Pierre e Miquelon", "HNNOMX": "Ora standard del Messico nord-occidentale", "WIT": "Ora dell’Indonesia orientale", "HNCU": "Ora standard di Cuba", "NZST": "Ora standard della Nuova Zelanda", "BOT": "Ora della Bolivia", "MEZ": "Ora standard dell’Europa centrale", "VET": "Ora del Venezuela", "AEDT": "Ora legale dell’Australia orientale", "CLT": "Ora standard del Cile", "OESZ": "Ora legale dell’Europa orientale", "UYST": "Ora legale dell’Uruguay", "CHADT": "Ora legale delle Chatham", "WAT": "Ora standard dell’Africa occidentale", "ACST": "Ora standard dell’Australia centrale", "HNT": "Ora standard di Terranova", "ARST": "Ora legale dell’Argentina", "AWST": "Ora standard dell’Australia occidentale", "HADT": "Ora legale delle Isole Hawaii-Aleutine", "CST": "Ora standard centrale USA", "BT": "Ora del Bhutan", "HEEG": "Ora legale della Groenlandia orientale", "LHST": "Ora standard di Lord Howe", "CLST": "Ora legale del Cile", "WEZ": "Ora standard dell’Europa occidentale", "GFT": "Ora della Guiana francese", "EST": "Ora standard orientale USA", "COT": "Ora standard della Colombia", "GMT": "Ora del meridiano di Greenwich", "AWDT": "Ora legale dell’Australia occidentale", "WIB": "Ora dell’Indonesia occidentale", "NZDT": "Ora legale della Nuova Zelanda", "ECT": "Ora dell’Ecuador", "∅∅∅": "Ora legale delle Azzorre", "HECU": "Ora legale di Cuba"},
}
}
// Locale returns the current translators string locale
func (it *it_VA) Locale(
|
main.js
|
/*jslint browser:true */
var jQuery;
var wssh = {};
(function() {
// For FormData without getter and setter
var proto = FormData.prototype,
data = {};
if (!proto.get) {
proto.get = function (name) {
if (data[name] === undefined) {
var input = document.querySelector('input[name="' + name + '"]'),
value;
if (input) {
if (input.type === 'file') {
value = input.files[0];
} else {
value = input.value;
}
data[name] = value;
}
}
return data[name];
};
}
if (!proto.set) {
proto.set = function (name, value) {
data[name] = value;
};
}
}());
jQuery(function($){
var status = $('#status'),
button = $('.btn-primary'),
form_container = $('.form-container'),
waiter = $('#waiter'),
term_type = $('#term'),
style = {},
default_title = 'WebSSH',
title_element = document.querySelector('title'),
form_id = '#connect',
debug = document.querySelector(form_id).noValidate,
custom_font = document.fonts ? document.fonts.values().next().value : undefined,
default_fonts,
DISCONNECTED = 0,
CONNECTING = 1,
CONNECTED = 2,
state = DISCONNECTED,
messages = {1: 'This client is connecting ...', 2: 'This client is already connected.'},
key_max_size = 16384,
fields = ['hostname', 'port', 'username'],
form_keys = fields.concat(['password', 'totp']),
opts_keys = ['bgcolor', 'title', 'encoding', 'command', 'term'],
url_form_data = {},
url_opts_data = {},
validated_form_data,
event_origin,
hostname_tester = /((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))|(^\s*((?=.{1,255}$)(?=.*[A-Za-z].*)[0-9A-Za-z](?:(?:[0-9A-Za-z]|\b-){0,61}[0-9A-Za-z])?(?:\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|\b-){0,61}[0-9A-Za-z])?)*)\s*$)/;
function store_items(names, data) {
var i, name, value;
for (i = 0; i < names.length; i++) {
name = names[i];
value = data.get(name);
if (value){
window.localStorage.setItem(name, value);
}
}
}
function restore_items(names) {
var i, name, value;
for (i=0; i < names.length; i++) {
name = names[i];
value = window.localStorage.getItem(name);
if (value) {
$('#'+name).val(value);
}
}
}
function populate_form(data) {
var names = form_keys.concat(['passphrase']),
i, name;
for (i=0; i < names.length; i++) {
name = names[i];
$('#'+name).val(data.get(name));
}
}
function get_object_length(object) {
return Object.keys(object).length;
}
function decode_uri(uri) {
try {
return decodeURI(uri);
} catch(e) {
console.error(e);
}
return '';
}
function decode_password(encoded) {
try {
return window.atob(encoded);
} catch (e) {
console.error(e);
}
return null;
}
function parse_url_data(string, form_keys, opts_keys, form_map, opts_map) {
var i, pair, key, val,
arr = string.split('&');
for (i = 0; i < arr.length; i++) {
pair = arr[i].split('=');
key = pair[0].trim().toLowerCase();
val = pair.slice(1).join('=').trim();
if (form_keys.indexOf(key) >= 0) {
form_map[key] = val;
} else if (opts_keys.indexOf(key) >=0) {
opts_map[key] = val;
}
}
if (form_map.password) {
form_map.password = decode_password(form_map.password);
}
}
function parse_xterm_style() {
var text = $('.xterm-helpers style').text();
var arr = text.split('xterm-normal-char{width:');
style.width = parseFloat(arr[1]);
arr = text.split('div{height:');
style.height = parseFloat(arr[1]);
}
function get_cell_size(term) {
style.width = term._core._renderService._renderer.dimensions.actualCellWidth;
style.height = term._core._renderService._renderer.dimensions.actualCellHeight;
}
function toggle_fullscreen(term) {
$('#terminal .terminal').toggleClass('fullscreen');
term.fitAddon.fit();
}
function current_geometry(term) {
if (!style.width || !style.height) {
try {
get_cell_size(term);
} catch (TypeError) {
parse_xterm_style();
}
}
var cols = parseInt(window.innerWidth / style.width, 10) - 1;
var rows = parseInt(window.innerHeight / style.height, 10);
return {'cols': cols, 'rows': rows};
}
function resize_terminal(term) {
var geometry = current_geometry(term);
term.on_resize(geometry.cols, geometry.rows);
}
function set_backgound_color(term, color) {
term.setOption('theme', {
background: color
});
}
function custom_font_is_loaded() {
if (!custom_font) {
console.log('No custom font specified.');
} else {
console.log('Status of custom font ' + custom_font.family + ': ' + custom_font.status);
if (custom_font.status === 'loaded') {
return true;
}
if (custom_font.status === 'unloaded') {
return false;
}
}
}
function update_font_family(term) {
if (term.font_family_updated) {
console.log('Already using custom font family');
return;
}
if (!default_fonts) {
default_fonts = term.getOption('fontFamily');
}
if (custom_font_is_loaded()) {
var new_fonts = custom_font.family + ', ' + default_fonts;
term.setOption('fontFamily', new_fonts);
term.font_family_updated = true;
console.log('Using custom font family ' + new_fonts);
}
}
function reset_font_family(term) {
if (!term.font_family_updated) {
console.log('Already using default font family');
return;
}
if (default_fonts) {
term.setOption('fontFamily', default_fonts);
term.font_family_updated = false;
console.log('Using default font family ' + default_fonts);
}
}
function format_geometry(cols, rows) {
return JSON.stringify({'cols': cols, 'rows': rows});
}
function read_as_text_with_decoder(file, callback, decoder) {
var reader = new window.FileReader();
|
reader.onload = function() {
var text;
try {
text = decoder.decode(reader.result);
} catch (TypeError) {
console.log('Decoding error happened.');
} finally {
if (callback) {
callback(text);
}
}
};
reader.onerror = function (e) {
console.error(e);
};
reader.readAsArrayBuffer(file);
}
function read_as_text_with_encoding(file, callback, encoding) {
var reader = new window.FileReader();
if (encoding === undefined) {
encoding = 'utf-8';
}
reader.onload = function() {
if (callback) {
callback(reader.result);
}
};
reader.onerror = function (e) {
console.error(e);
};
reader.readAsText(file, encoding);
}
function read_file_as_text(file, callback, decoder) {
if (!window.TextDecoder) {
read_as_text_with_encoding(file, callback, decoder);
} else {
read_as_text_with_decoder(file, callback, decoder);
}
}
function reset_wssh() {
var name;
for (name in wssh) {
if (wssh.hasOwnProperty(name) && name !== 'connect') {
delete wssh[name];
}
}
}
function log_status(text, to_populate) {
console.log(text);
status.html(text.split('\n').join('<br/>'));
if (to_populate && validated_form_data) {
populate_form(validated_form_data);
validated_form_data = undefined;
}
if (waiter.css('display') !== 'none') {
waiter.hide();
}
if (form_container.css('display') === 'none') {
form_container.show();
}
}
function ajax_complete_callback(resp) {
button.prop('disabled', false);
if (resp.status !== 200) {
log_status(resp.status + ': ' + resp.statusText, true);
state = DISCONNECTED;
return;
}
var msg = resp.responseJSON;
if (!msg.id) {
log_status(msg.status, true);
state = DISCONNECTED;
return;
}
var ws_url = window.location.href.split(/\?|#/, 1)[0].replace('http', 'ws'),
join = (ws_url[ws_url.length-1] === '/' ? '' : '/'),
url = ws_url + join + 'ws?id=' + msg.id,
sock = new window.WebSocket(url),
encoding = 'utf-8',
decoder = window.TextDecoder ? new window.TextDecoder(encoding) : encoding,
terminal = document.getElementById('terminal'),
term = new window.Terminal({
cursorBlink: true,
theme: {
background: url_opts_data.bgcolor || 'black'
}
});
term.fitAddon = new window.FitAddon.FitAddon();
term.loadAddon(term.fitAddon);
term.setOption('scrollback', 9999999);
term.setOption('convertEol', true);
console.log(url);
if (!msg.encoding) {
console.log('Unable to detect the default encoding of your server');
msg.encoding = encoding;
} else {
console.log('The default encoding of your server is ' + msg.encoding);
}
function term_write(text) {
if (term) {
term.write(text);
if (!term.resized) {
resize_terminal(term);
term.resized = true;
}
}
}
function set_encoding(new_encoding) {
// for console use
if (!new_encoding) {
console.log('An encoding is required');
return;
}
if (!window.TextDecoder) {
decoder = new_encoding;
encoding = decoder;
console.log('Set encoding to ' + encoding);
} else {
try {
decoder = new window.TextDecoder(new_encoding);
encoding = decoder.encoding;
console.log('Set encoding to ' + encoding);
} catch (RangeError) {
console.log('Unknown encoding ' + new_encoding);
return false;
}
}
}
wssh.set_encoding = set_encoding;
if (url_opts_data.encoding) {
if (set_encoding(url_opts_data.encoding) === false) {
set_encoding(msg.encoding);
}
} else {
set_encoding(msg.encoding);
}
wssh.geometry = function() {
// for console use
var geometry = current_geometry(term);
console.log('Current window geometry: ' + JSON.stringify(geometry));
};
wssh.send = function(data) {
// for console use
if (!sock) {
console.log('Websocket was already closed');
return;
}
if (typeof data !== 'string') {
console.log('Only string is allowed');
return;
}
try {
JSON.parse(data);
sock.send(data);
} catch (SyntaxError) {
data = data.trim() + '\r';
sock.send(JSON.stringify({'data': data}));
}
};
wssh.reset_encoding = function() {
// for console use
if (encoding === msg.encoding) {
console.log('Already reset to ' + msg.encoding);
} else {
set_encoding(msg.encoding);
}
};
wssh.resize = function(cols, rows) {
// for console use
if (term === undefined) {
console.log('Terminal was already destroyed');
return;
}
var valid_args = false;
if (cols > 0 && rows > 0) {
var geometry = current_geometry(term);
if (cols <= geometry.cols && rows <= geometry.rows) {
valid_args = true;
}
}
if (!valid_args) {
console.log('Unable to resize terminal to geometry: ' + format_geometry(cols, rows));
} else {
term.on_resize(cols, rows);
}
};
wssh.set_bgcolor = function(color) {
set_backgound_color(term, color);
};
wssh.custom_font = function() {
update_font_family(term);
};
wssh.default_font = function() {
reset_font_family(term);
};
term.on_resize = function(cols, rows) {
if (cols !== this.cols || rows !== this.rows) {
console.log('Resizing terminal to geometry: ' + format_geometry(cols, rows));
this.resize(cols, rows);
sock.send(JSON.stringify({'resize': [cols, rows]}));
}
};
term.onData(function(data) {
// console.log(data);
sock.send(JSON.stringify({'data': data}));
});
sock.onopen = function() {
term.open(terminal);
toggle_fullscreen(term);
update_font_family(term);
term.focus();
state = CONNECTED;
title_element.text = url_opts_data.title || default_title;
if (url_opts_data.command) {
setTimeout(function () {
sock.send(JSON.stringify({'data': url_opts_data.command+'\r'}));
}, 500);
}
};
sock.onmessage = function(msg) {
read_file_as_text(msg.data, term_write, decoder);
};
sock.onerror = function(e) {
console.error(e);
};
sock.onclose = function(e) {
term.dispose();
term = undefined;
sock = undefined;
reset_wssh();
log_status(e.reason, true);
state = DISCONNECTED;
default_title = 'WizardWebSSH';
title_element.text = default_title;
};
$(window).resize(function(){
if (term) {
resize_terminal(term);
}
});
}
function wrap_object(opts) {
var obj = {};
obj.get = function(attr) {
return opts[attr] || '';
};
obj.set = function(attr, val) {
opts[attr] = val;
};
return obj;
}
function clean_data(data) {
var i, attr, val;
var attrs = form_keys.concat(['privatekey', 'passphrase']);
for (i = 0; i < attrs.length; i++) {
attr = attrs[i];
val = data.get(attr);
if (typeof val === 'string') {
data.set(attr, val.trim());
}
}
}
function validate_form_data(data) {
clean_data(data);
var hostname = data.get('hostname'),
port = data.get('port'),
username = data.get('username'),
pk = data.get('privatekey'),
result = {
valid: false,
data: data,
title: ''
},
errors = [], size;
if (!hostname) {
errors.push('Value of hostname is required.');
} else {
if (!hostname_tester.test(hostname)) {
errors.push('Invalid hostname: ' + hostname);
}
}
if (!port) {
port = 22;
} else {
if (!(port > 0 && port < 65535)) {
errors.push('Invalid port: ' + port);
}
}
if (!username) {
errors.push('Value of username is required.');
}
if (pk) {
size = pk.size || pk.length;
if (size > key_max_size) {
errors.push('Invalid private key: ' + pk.name || '');
}
}
if (!errors.length || debug) {
result.valid = true;
result.title = username + '@' + hostname + ':' + port;
}
result.errors = errors;
return result;
}
// Fix empty input file ajax submission error for safari 11.x
function disable_file_inputs(inputs) {
var i, input;
for (i = 0; i < inputs.length; i++) {
input = inputs[i];
if (input.files.length === 0) {
input.setAttribute('disabled', '');
}
}
}
function enable_file_inputs(inputs) {
var i;
for (i = 0; i < inputs.length; i++) {
inputs[i].removeAttribute('disabled');
}
}
function connect_without_options() {
// use data from the form
var form = document.querySelector(form_id),
inputs = form.querySelectorAll('input[type="file"]'),
url = form.action,
data, pk;
disable_file_inputs(inputs);
data = new FormData(form);
pk = data.get('privatekey');
enable_file_inputs(inputs);
function ajax_post() {
status.text('');
button.prop('disabled', true);
$.ajax({
url: url,
type: 'post',
data: data,
complete: ajax_complete_callback,
cache: false,
contentType: false,
processData: false
});
}
var result = validate_form_data(data);
if (!result.valid) {
log_status(result.errors.join('\n'));
return;
}
if (pk && pk.size && !debug) {
read_file_as_text(pk, function(text) {
if (text === undefined) {
log_status('Invalid private key: ' + pk.name);
} else {
ajax_post();
}
});
} else {
ajax_post();
}
return result;
}
function connect_with_options(data) {
// use data from the arguments
var form = document.querySelector(form_id),
url = data.url || form.action,
_xsrf = form.querySelector('input[name="_xsrf"]');
var result = validate_form_data(wrap_object(data));
if (!result.valid) {
log_status(result.errors.join('\n'));
return;
}
data.term = term_type.val();
data._xsrf = _xsrf.value;
if (event_origin) {
data._origin = event_origin;
}
status.text('');
button.prop('disabled', true);
$.ajax({
url: url,
type: 'post',
data: data,
cache: false,
complete: ajax_complete_callback
});
return result;
}
function connect(hostname, port, username, password, privatekey, passphrase, totp) {
// for console use
var result, opts;
if (state !== DISCONNECTED) {
console.log(messages[state]);
return;
}
if (hostname === undefined) {
result = connect_without_options();
} else {
if (typeof hostname === 'string') {
opts = {
hostname: hostname,
port: port,
username: username,
password: password,
privatekey: privatekey,
passphrase: passphrase,
totp: totp
};
} else {
opts = hostname;
}
result = connect_with_options(opts);
}
if (result) {
state = CONNECTING;
default_title = result.title;
if (hostname) {
validated_form_data = result.data;
}
store_items(fields, result.data);
}
}
wssh.connect = connect;
$(form_id).submit(function(event){
event.preventDefault();
connect();
});
function cross_origin_connect(event)
{
console.log(event.origin);
var prop = 'connect',
args;
try {
args = JSON.parse(event.data);
} catch (SyntaxError) {
args = event.data.split('|');
}
if (!Array.isArray(args)) {
args = [args];
}
try {
event_origin = event.origin;
wssh[prop].apply(wssh, args);
} finally {
event_origin = undefined;
}
}
window.addEventListener('message', cross_origin_connect, false);
if (document.fonts) {
document.fonts.ready.then(
function () {
if (custom_font_is_loaded() === false) {
document.body.style.fontFamily = custom_font.family;
}
}
);
}
parse_url_data(
decode_uri(window.location.search.substring(1)) + '&' + decode_uri(window.location.hash.substring(1)),
form_keys, opts_keys, url_form_data, url_opts_data
);
// console.log(url_form_data);
// console.log(url_opts_data);
if (url_opts_data.term) {
term_type.val(url_opts_data.term);
}
if (url_form_data.password === null) {
log_status('Password via url must be encoded in base64.');
} else {
if (get_object_length(url_form_data)) {
waiter.show();
connect(url_form_data);
} else {
restore_items(fields);
form_container.show();
}
}
});
|
if (decoder === undefined) {
decoder = new window.TextDecoder('utf-8', {'fatal': true});
}
|
group.go
|
package auth
type Group interface {
Identity
}
type group struct {
identity
members map[string]Identity
}
func
|
(identityProvider *identityProvider, id string, displayName string) group {
return group{
identity: newIdentity(identityProvider, id, displayName),
members: make(map[string]Identity),
}
}
func (g *group) addUser(user *user) (err error) {
user.parents[g.uniqueId] = g
g.members[user.uniqueId] = user
return
}
|
newGroup
|
NoobSender.py
|
from model.Sender import Sender
from model.SenderType import SenderType
import logging
import math
import numpy as np
class NoobSender(Sender):
|
def __init__(self, id, deliveryRate, debug=True):
super().__init__(id, SenderType.Noob, deliveryRate=deliveryRate, debug=debug)
def getNumberOfPacketsToCreateForTimeStep(self, timeStep):
num = math.floor(timeStep * self.deliveryRate) - math.floor((timeStep - 1) * self.deliveryRate)
# print(num)
# randomness
# if self.debug:
# logging.info(f"Sender #{self.id} creating {numberOfPackets} packets at {timeStep}")
# return math.floor( num * np.random.uniform(0.5, 1.1))
return num
def onTimeStepStart(self, timeStep):
"""To be called at the beginning of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onTimeStepEnd(self, timeStep):
"""To be called at the end of a timeStep
Args:
timeStep ([type]): [description]
"""
pass
def onACK(self, packet):
super().onACK(packet)
# packet loss conditions:
# 1. ACK out of order.
# 2.
# if self.debug:
# logging.info(f"{self.getName()}: got ack for packet {packet.getPacketNumber()}")
pass
|
|
Single.js
|
/**
* Slider which supports vertical or horizontal orientation, keyboard adjustments, configurable snapping, axis clicking
* and animation. Can be added as an item to any container.
*
* @example
* Ext.create('Ext.slider.Single', {
* width: 200,
* value: 50,
* increment: 10,
* minValue: 0,
* maxValue: 100,
* renderTo: Ext.getBody()
* });
*
* The class Ext.slider.Single is aliased to Ext.Slider for backwards compatibility.
*/
Ext.define('Ext.slider.Single', {
extend: 'Ext.slider.Multi',
alias: ['widget.slider', 'widget.sliderfield'],
alternateClassName: [
'Ext.Slider',
'Ext.form.SliderField',
'Ext.slider.SingleSlider',
'Ext.slider.Slider'
],
initComponent: function() {
if (this.publishOnComplete) {
this.valuePublishEvent = 'changecomplete';
}
this.callParent();
},
/**
* @cfg {Boolean} [publishOnComplete=true]
* This controls when the value of the slider is published to the `ViewModel`. By
* default this is done only when the thumb is released (the change is complete). To
* cause this to happen on every change of the thumb position, specify `false`. This
* setting is `true` by default for improved performance on slower devices (such as
* older browsers or tablets).
*/
publishOnComplete: true,
/**
* Returns the current value of the slider
* @return {Number} The current value of the slider
|
// just returns the value of the first thumb, which should be the only one in a single slider
return this.callParent([0]);
},
/**
* Programmatically sets the value of the Slider. Ensures that the value is constrained within the minValue and
* maxValue.
* @param {Number} value The value to set the slider to. (This will be constrained within minValue and maxValue)
* @param {Object/Boolean} [animate] `false` to not animate. `true` to use the default animation. This may also be an
* animate configuration object, see {@link #cfg-animate}. If this configuration is omitted, the {@link #cfg-animate} configuration
* will be used.
*/
setValue: function(value, animate) {
var args = arguments,
len = args.length;
// this is to maintain backwards compatibility for sliders with only one thumb. Usually you must pass the thumb
// index to setValue, but if we only have one thumb we inject the index here first if given the multi-slider
// signature without the required index. The index will always be 0 for a single slider
if (len === 1 || (len <= 3 && typeof args[1] !== 'number')) {
args = Ext.toArray(args);
args.unshift(0);
}
return this.callParent(args);
},
/**
* @private
*/
getNearest : function(){
// Since there's only 1 thumb, it's always the nearest
return this.thumbs[0];
}
});
|
*/
getValue: function() {
|
login.component.ts
|
import { Component, OnInit, HostBinding, OnDestroy } from '@angular/core';
import { FormGroup, FormControl, FormBuilder, Validators } from '@angular/forms';
import { AuthenticationService } from 'app/service';
import { MessageService } from 'primeng/components/common/messageservice';
import { Router } from '@angular/router';
@Component({
selector: 'app-login',
templateUrl: './login.component.html',
styleUrls: ['./login.component.less']
})
export class LoginComponent implements OnInit {
authForm: FormGroup;
constructor(
private authService: AuthenticationService,
private fb: FormBuilder,
private router: Router,
private messageService: MessageService) { }
ngOnInit() {
this.authForm = this.fb.group({
'username': ['', Validators.required],
|
}
submitForm() {
const credentials = this.authForm.value;
this.authForm.reset();
this.authService.login(credentials.username, credentials.password).subscribe(loginCheck => {
if (!loginCheck) {
this.messageService.add({ severity: 'error', summary: 'wrong credentials' });
} else {
this.router.navigate(['pages/home']);
}
});
}
}
|
'password': ['', Validators.required]
});
|
driver.go
|
package wtclientrpc
import (
"errors"
"fmt"
|
)
// createNewSubServer is a helper method that will create the new sub server
// given the main config dispatcher method. If we're unable to find the config
// that is meant for us in the config dispatcher, then we'll exit with an
// error.
func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
lnrpc.SubServer, lnrpc.MacaroonPerms, error) {
// We'll attempt to look up the config that we expect, according to our
// subServerName name. If we can't find this, then we'll exit with an
// error, as we're unable to properly initialize ourselves without this
// config.
subServerConf, ok := configRegistry.FetchConfig(subServerName)
if !ok {
return nil, nil, fmt.Errorf("unable to find config for "+
"subserver type %s", subServerName)
}
// Now that we've found an object mapping to our service name, we'll
// ensure that it's the type we need.
config, ok := subServerConf.(*Config)
if !ok {
return nil, nil, fmt.Errorf("wrong type of config for "+
"subserver %s, expected %T got %T", subServerName,
&Config{}, subServerConf)
}
// Before we try to make the new service instance, we'll perform
// some sanity checks on the arguments to ensure that they're useable.
switch {
case config.Resolver == nil:
return nil, nil, errors.New("a lncfg.TCPResolver is required")
}
return New(config)
}
func init() {
subServer := &lnrpc.SubServerDriver{
SubServerName: subServerName,
New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer,
lnrpc.MacaroonPerms, error) {
return createNewSubServer(c)
},
}
// If the build tag is active, then we'll register ourselves as a
// sub-RPC server within the global lnrpc package namespace.
if err := lnrpc.RegisterSubServer(subServer); err != nil {
panic(fmt.Sprintf("failed to register sub server driver "+
"'%s': %v", subServerName, err))
}
}
|
"github.com/eacsuite/lnd/lnrpc"
|
color_map_bar_editor.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.data_range_1d import DataRange1D
from chaco.default_colormaps import color_map_dict, color_map_name_dict
from pyface.qt.QtGui import QPainter, QColor, QFrame
from traits.api import Float, Int, Str
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
from numpy import array
# ============= local library imports ==========================
# from matplotlib.cm import get_cmap
class Bar(QFrame):
value = None
low = 0
high = 1
color_scalar = 1
colormap = 'jet'
bar_width = 100
scale = 'power'
# def __init__(self, parent, ident=-1):
# super(Bar, self).__init__()
# self._cmap = get_cmap(self.colormap)
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
qp.setBrush(QColor(*self.value))
qp.drawRect(0, 0, self.bar_width, 20)
qp.end()
def set_value(self, v):
"""
map v to users color scale
use power law v=A*x**(1/cs)
increase cs increases the rate of change at low values
increase cs will make it easier to see small pertubations (more color change) at
the low end.
"""
if self.scale == 'power':
N = 1 / float(self.color_scalar)
A = 1 / self.high ** N
nv = A * v ** N
else:
nv = min(1, max(0, (v - self.low) / (self.high - self.low)))
vs = self.cmap.map_screen(array([nv,]))[0][:3]
self.value = [x * 255 for x in vs]
self.update()
class
|
(Editor):
def init(self, parent):
self.control = Bar()
self.control.low = low = self.factory.low
self.control.high = high = self.factory.high
self.control.color_scalar = self.factory.color_scalar
self.control.bar_width = self.factory.width
self.control.scale = self.factory.scale
# if self.factory.scale == 'power':
# high = N = 1 / float(self.color_scalar)
# A = 1 / self.high ** N
self.control.cmap = color_map_name_dict[self.factory.colormap](DataRange1D(low_setting=0, high_setting=1))
def update_editor(self):
if self.control:
self.control.set_value(self.value)
class BarGaugeEditor(BasicEditorFactory):
klass = _BarGaugeEditor
low = Float
high = Float
color_scalar = Int(1)
scale = Str('power')
colormap = Str('jet')
width = Int(100)
# ============= EOF =============================================
|
_BarGaugeEditor
|
twist_controller.py
|
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
mn = 0. # Minimum throttle value
mx = 0.2 # Maximum throttle value
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1 / (2pi * tau) = cutoff frequency
ts = .02 # Sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel < 0.1:
throttle = 0
brake = 400 # N*m - to hold the car in place if we are stopped at a light. Acceleration - 1m/s^2
elif throttle < .1 and vel_error < 0:
throttle = 0
|
brake = abs(decel) * self.vehicle_mass * self.wheel_radius # Torque N*m
return throttle, brake, steering
|
decel = max(vel_error, self.decel_limit)
|
context_processors.py
|
from django.conf import settings
from guardian.shortcuts import get_objects_for_user
from signbank.tools import get_selected_datasets_for_user, get_datasets_with_public_glosses
from signbank.dictionary.models import Dataset
def url(request):
if not request.user.is_authenticated():
# for anonymous users, show datasets with public glosses in header
viewable_datasets = get_datasets_with_public_glosses()
if 'selected_datasets' in request.session.keys():
selected_datasets = Dataset.objects.filter(acronym__in=request.session['selected_datasets'])
else:
# this happens at the start of a session
selected_datasets = Dataset.objects.filter(acronym=settings.DEFAULT_DATASET_ACRONYM)
else:
# display all datasets in header
viewable_datasets = Dataset.objects.all()
selected_datasets = get_selected_datasets_for_user(request.user)
return {'URL': settings.URL,
'PREFIX_URL': settings.PREFIX_URL,
'viewable_datasets': [(dataset, dataset in selected_datasets) for dataset in viewable_datasets],
|
'SEPARATE_ENGLISH_IDGLOSS_FIELD':settings.SEPARATE_ENGLISH_IDGLOSS_FIELD,
'CROP_GLOSS_IMAGES': settings.CROP_GLOSS_IMAGES,
'INTERFACE_LANGUAGE_CODES': [language_code for language_code, full_name in settings.LANGUAGES],
'INTERFACE_LANGUAGE_SHORT_NAMES': settings.INTERFACE_LANGUAGE_SHORT_NAMES
}
|
'selected_datasets': selected_datasets,
'SHOW_DATASET_INTERFACE_OPTIONS': settings.SHOW_DATASET_INTERFACE_OPTIONS,
|
base.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::message::action_fuse::ActionFuseHandle;
use crate::message::beacon::Beacon;
use crate::message::message_client::MessageClient;
use crate::message::messenger::MessengerClient;
use crate::message::receptor::Receptor;
use crate::message::Timestamp;
use futures::channel::mpsc::UnboundedSender;
use futures::channel::oneshot::Sender;
use std::collections::HashSet;
use std::fmt::Debug;
use std::hash::Hash;
use thiserror::Error;
/// Trait alias for types of data that can be used as the payload in a
/// MessageHub.
pub trait Payload: Clone + Debug + Send + Sync {}
impl<T: Clone + Debug + Send + Sync> Payload for T {}
/// Trait alias for types of data that can be used as an address in a
/// MessageHub.
pub trait Address: Copy + Debug + Eq + Hash + Unpin + Send + Sync {}
impl<T: Copy + Debug + Eq + Hash + Unpin + Send + Sync> Address for T {}
/// Trait alias for types of data that can be used as a role in a
/// MessageHub.
pub trait Role: Copy + Debug + Eq + Hash + Send + Sync {}
impl<T: Copy + Debug + Eq + Hash + Send + Sync> Role for T {}
/// A mod for housing common definitions for messengers. Messengers are
/// MessageHub participants, which are capable of sending and receiving
/// messages.
pub(super) mod messenger {
use super::{role, Address, MessengerType, Payload, Role};
use std::collections::HashSet;
pub type Roles<R> = HashSet<role::Signature<R>>;
/// `Descriptor` is a blueprint for creating a messenger. It is sent to the
/// MessageHub by clients, which interprets the information to build the
/// messenger.
#[derive(Clone)]
pub struct Descriptor<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
/// The type of messenger to be created. This determines how messages
/// can be directed to a messenger created from this Descriptor.
/// Please reference [`Audience`] to see how these types map to audience
/// targets.
pub messenger_type: MessengerType<P, A, R>,
/// The roles to associate with this messenger. When a messenger
/// is associated with a given [`Role`], any message directed to that
/// [`Role`] will be delivered to the messenger.
pub roles: Roles<R>,
}
}
/// A MessageEvent defines the data that can be returned through a message
/// receptor.
#[derive(Debug, PartialEq)]
pub enum MessageEvent<P: Payload + 'static, A: Address + 'static, R: Role + 'static = default::Role>
{
/// A message that has been delivered, either as a new message directed at to
/// the recipient's address or a reply to a previously sent message
/// (dependent on the receptor's context).
Message(P, MessageClient<P, A, R>),
/// A status update for the message that spawned the receptor delivering this
/// update.
Status(Status),
}
/// This mod contains common definitions for working with [`Role`]. [`Role`]
/// defines a group which messengers can belong to and messages can be directed
/// to.
pub mod role {
use super::Role;
use futures::channel::mpsc::UnboundedSender;
use futures::channel::oneshot::Sender;
/// An enumeration of role-related actions that can be requested upon the
/// MessageHub.
#[allow(dead_code)]
pub(in crate::message) enum Action<R: Role + 'static> {
/// Creates an anonymous Role at runtime.
Create(ResultSender<R>),
}
/// A sender given to MessageHub clients to relay role-related requests.
pub(in crate::message) type ActionSender<R> = UnboundedSender<Action<R>>;
/// A sender passed along with some [`Action`] types in order to send back a
/// response.
pub(in crate::message) type ResultSender<R> = Sender<Result<Response<R>, Error>>;
/// The return value in response to an [`Action`] upon success.
pub(in crate::message) enum Response<R: Role + 'static> {
Role(Signature<R>),
}
/// The error types sent when [`Action`] fails.
#[derive(thiserror::Error, Debug, Clone, PartialEq)]
pub enum Error {
/// The MessageHub handed back a response type we weren't expecting.
#[error("Unexpected response")]
UnexpectedResponse,
/// There was an issue communicating back the response.
#[error("Communication Error")]
CommunicationError,
}
/// The public representation of a role. `Signature` is used for adding a
/// messenger to a particular role group and targeting a particular group
/// as the audience for an outbound message.
#[derive(PartialEq, Copy, Clone, Debug, Eq, Hash)]
pub struct Signature<R: Role + 'static> {
signature_type: SignatureType<R>,
}
impl<R: Role + 'static> Signature<R> {
/// Returns a `Signature` based on the a predefined role.
pub fn role(role: R) -> Self {
Self { signature_type: SignatureType::Role(role) }
}
/// Returns a `Signature` based on a generated or anonymous role. This
/// `Signature` can only be generated by the MessageHub.
pub(in crate::message) fn handle(handle: Handle) -> Self {
Self { signature_type: SignatureType::Anonymous(handle) }
}
}
/// An enumeration of role types used internally in [`Signature`] to
/// uniquely identify the role.
#[derive(PartialEq, Copy, Clone, Debug, Eq, Hash)]
enum SignatureType<R: Role + 'static> {
Role(R),
Anonymous(Handle),
}
/// A `Handle` is a reference to a role generated at runtime. `Handle`
/// should be transparent to the client and only produced as part of a
/// [`Signature`] variant through the MessageHub.
#[derive(PartialEq, Copy, Clone, Debug, Eq, Hash)]
pub(in crate::message) struct Handle {
id: usize,
}
impl Handle {
pub(super) fn new(id: usize) -> Self {
Handle { id }
}
}
/// `Generator` is a helper for generating roles at runtime. Each invocation
/// produces a [`Signature`] for a unique anonymous role.
pub(in crate::message) struct Generator {
next_id: usize,
}
impl Generator {
/// Instantiates a new `Generator`.
pub(in crate::message) fn new() -> Self {
Self { next_id: 0 }
}
/// Produces a `Signature` referencing a unique role at runtime.
pub(in crate::message) fn generate<R: Role + 'static>(&mut self) -> Signature<R> {
let handle = Handle::new(self.next_id);
self.next_id += 1;
Signature::handle(handle)
}
}
}
/// This mod contains the default type definitions for the MessageHub's type
/// parameters when not specified.
pub mod default {
/// `Address` provides a [`Address`] definition for message hubs not needing
/// an address.
#[derive(PartialEq, Copy, Clone, Debug, Eq, Hash)]
pub enum Address {}
/// `Role` provides a [`Role`] definition for message hubs not needing
/// roles.
#[derive(PartialEq, Copy, Clone, Debug, Eq, Hash)]
pub enum Role {}
}
#[derive(Error, Debug, Clone)]
pub enum MessageError<A: Address + 'static> {
#[error("Address conflig:{address:?} already exists")]
AddressConflict { address: A },
#[error("Unexpected Error")]
Unexpected,
}
/// The types of results possible from sending or replying.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Status {
// Sent to some audience, potentially no one.
Broadcasted,
// Received by the intended address.
Received,
// Could not be delivered to the specified target.
// TODO(61469): add intended address to this enum.
Undeliverable,
// Acknowledged by a recipient.
Acknowledged,
Timeout,
}
/// The intended recipients for a message.
#[derive(Clone, Debug, PartialEq, Hash, Eq)]
pub enum Audience<A: Address + 'static, R: Role + 'static = default::Role> {
// All non-broker messengers outside of the sender.
Broadcast,
// An Audience Group.
Group(group::Group<A, R>),
// The messenger at the specified address.
Address(A),
// The messenger with the specified signature.
Messenger(Signature<A>),
// A messenger who belongs to the specified role.
Role(role::Signature<R>),
}
impl<A: Address + 'static, R: Role + 'static> Audience<A, R> {
/// Indicates whether a message directed towards this `Audience` must match
/// to a messenger or if it's okay for the message to be delivered to no
/// one. For example, broadcasts are meant to be delivered to any
/// (potentially no) messenger.
pub fn requires_delivery(&self) -> bool {
match self {
Audience::Broadcast => false,
Audience::Role(_) => false,
Audience::Group(group) => {
group.audiences.iter().any(|audience| audience.requires_delivery())
}
Audience::Address(_) | Audience::Messenger(_) => true,
}
}
pub fn contains(&self, audience: &Audience<A, R>) -> bool {
audience.flatten().is_subset(&self.flatten())
}
pub fn flatten(&self) -> HashSet<Audience<A, R>> {
match self {
Audience::Group(group) => {
group.audiences.iter().map(|audience| audience.flatten()).flatten().collect()
}
_ => {
let mut hash_set = HashSet::new();
hash_set.insert(self.clone());
hash_set
}
}
}
}
pub mod group {
use super::{Address, Audience, Role};
#[derive(Clone, Debug, PartialEq, Hash, Eq)]
pub struct Group<A: Address + 'static, R: Role + 'static> {
pub audiences: Vec<Audience<A, R>>,
}
impl<A: Address + 'static, R: Role + 'static> Group<A, R> {
pub fn contains(&self, audience: &Audience<A, R>) -> bool {
for target in &self.audiences {
if target == audience {
return true;
} else if let Audience::Group(group) = target {
if group.contains(audience) {
return true;
}
}
}
false
}
}
#[cfg(test)]
pub(crate) struct Builder<A: Address + 'static, R: Role + 'static> {
audiences: Vec<Audience<A, R>>,
}
#[cfg(test)]
impl<A: Address + 'static, R: Role + 'static> Builder<A, R> {
pub(crate) fn new() -> Self {
Self { audiences: vec![] }
}
pub(crate) fn add(mut self, audience: Audience<A, R>) -> Self {
self.audiences.push(audience);
self
}
pub(crate) fn build(self) -> Group<A, R> {
Group { audiences: self.audiences }
}
}
}
/// An identifier that can be used to send messages directly to a Messenger.
/// Included with Message instances.
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
pub enum Signature<A> {
// Messenger at a given address.
Address(A),
// The messenger cannot be directly addressed.
Anonymous(MessengerId),
}
#[derive(Copy, Clone, Debug)]
pub struct Fingerprint<A> {
pub id: MessengerId,
pub signature: Signature<A>,
}
/// The messengers that can participate in messaging
#[derive(Clone, Debug)]
pub enum MessengerType<
P: Payload + 'static,
A: Address + 'static,
R: Role + 'static = default::Role,
> {
/// An endpoint in the messenger graph. Can have messages specifically
/// addressed to it and can author new messages.
Addressable(A),
/// A intermediary messenger. Will receive every forwarded message. Brokers
/// are able to send and reply to messages, but the main purpose is to observe
/// messages. An optional filter may be specified, which limits the messages
/// directed to this broker.
Broker(Option<filter::Filter<P, A, R>>),
/// A messenger that cannot be reached by an address.
Unbound,
}
pub mod filter {
use super::{Address, Audience, Message, MessageType, Payload, Role, Signature};
use core::fmt::{Debug, Formatter};
use std::sync::Arc;
/// `Condition` allows specifying a filter condition that must be true
/// for a filter to match.
#[derive(Clone)]
pub enum Condition<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
/// Matches on the message's intended audience as specified by the
/// sender.
Audience(Audience<A, R>),
/// Matches on the author's signature.
Author(Signature<A>),
/// Matches on a custom closure that may evaluate the sent message.
Custom(Arc<dyn Fn(&Message<P, A, R>) -> bool + Send + Sync>),
/// Matches on another filter and its conditions.
Filter(Filter<P, A, R>),
}
/// We must implement Debug since the `Condition::Custom` does not provide
/// a `Debug` implementation.
impl<P: Payload + 'static, A: Address + 'static, R: Role + 'static> Debug for Condition<P, A, R> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let condition = match self {
Condition::Audience(audience) => format!("audience:{:?}", audience),
Condition::Author(signature) => format!("author:{:?}", signature),
Condition::Custom(_) => "custom".to_string(),
Condition::Filter(filter) => format!("filter:{:?}", filter),
};
write!(f, "Condition: {:?}", condition)
}
}
// TODO(fxbug.dev/68663): investigate implementing std::ops::Bit* traits.
/// `Conjugation` dictates how multiple conditions are combined to determine
/// a match.
#[derive(Clone, Debug, PartialEq)]
pub enum Conjugation {
/// All conditions must match.
All,
/// Any condition may declare a match.
Any,
}
/// `Builder` provides a convenient way to construct a [`Filter`] based on
/// a number of conditions.
pub struct Builder<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
conjugation: Conjugation,
conditions: Vec<Condition<P, A, R>>,
}
impl<P: Payload + 'static, A: Address + 'static, R: Role + 'static> Builder<P, A, R> {
pub fn new(condition: Condition<P, A, R>, conjugation: Conjugation) -> Self {
Self { conjugation, conditions: vec![condition] }
}
/// Shorthand method to create a filter based on a single condition.
pub fn single(condition: Condition<P, A, R>) -> Filter<P, A, R> {
Builder::new(condition, Conjugation::All).build()
}
/// Adds an additional condition to the filter under construction.
pub fn append(mut self, condition: Condition<P, A, R>) -> Self {
self.conditions.push(condition);
self
}
pub fn build(self) -> Filter<P, A, R> {
Filter { conjugation: self.conjugation, conditions: self.conditions }
}
}
/// `Filter` is used by the `MessageHub` to determine whether an incoming
/// message should be directed to associated broker.
#[derive(Clone, Debug)]
pub struct Filter<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
conjugation: Conjugation,
conditions: Vec<Condition<P, A, R>>,
}
impl<P: Payload + 'static, A: Address + 'static, R: Role + 'static> Filter<P, A, R> {
pub fn matches(&self, message: &Message<P, A, R>) -> bool {
for condition in &self.conditions {
let match_found = match condition {
Condition::Audience(audience) => matches!(
message.get_type(),
MessageType::Origin(target) if target.contains(audience)),
Condition::Custom(check_fn) => (check_fn)(message),
Condition::Filter(filter) => filter.matches(&message),
Condition::Author(signature) => message.get_author().eq(signature),
};
if match_found {
if self.conjugation == Conjugation::Any {
return true;
}
} else if self.conjugation == Conjugation::All {
return false;
}
}
self.conjugation == Conjugation::All
}
}
}
/// MessageType captures details about the Message's source.
#[derive(Clone, Debug)]
pub enum MessageType<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
/// A completely new message that is intended for the specified audience.
Origin(Audience<A, R>),
/// A response to a previously received message. Note that the value must
/// be boxed to mitigate recursive sizing issues as MessageType is held by
/// Message.
Reply(Box<Message<P, A, R>>),
}
/// `Attribution` describes the relationship of the message path in relation
/// to the author.
#[derive(Clone, Debug)]
pub enum Attribution<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
/// `Source` attributed messages are the original messages to be sent on a
/// path. For example, a source attribution for an origin message type will
/// be authored by the original sender. In a reply message type, a source
/// attribution means the reply was authored by the original message's
/// intended target.
Source(MessageType<P, A, R>),
/// `Derived` attributed messages are messages that have been modified by
/// someone in the message path. They follow the same trajectory (audience
/// or return path), but their message has been altered. The supplied
/// signature is the messenger that modified the specified message.
Derived(Box<Message<P, A, R>>, Signature<A>),
}
/// The core messaging unit. A Message may be annotated by messengers, but is
/// not associated with a particular Messenger instance.
#[derive(Clone, Debug)]
pub struct Message<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
author: Fingerprint<A>,
timestamp: Timestamp,
payload: P,
attribution: Attribution<P, A, R>,
// The return path is generated while the message is passed from messenger
// to messenger on the way to the intended recipient. It indicates the
// messengers that would like to be informed of replies to this message.
// The message author is always the last element in this vector. New
// participants are pushed to the front.
return_path: Vec<Beacon<P, A, R>>,
}
impl<P: Payload + 'static, A: Address + 'static, R: Role + 'static> Message<P, A, R> {
/// Returns a new Message instance. Only the MessageHub can mint new messages.
pub(super) fn new(
author: Fingerprint<A>,
timestamp: Timestamp,
payload: P,
attribution: Attribution<P, A, R>,
) -> Message<P, A, R> {
let mut return_path = vec![];
// A derived message adopts the return path of the original message.
if let Attribution::Derived(message, _) = &attribution {
return_path.extend(message.get_return_path().iter().cloned());
}
Message { author, timestamp, payload, attribution, return_path }
}
/// Adds an entity to be notified on any replies.
pub(super) fn add_participant(&mut self, participant: Beacon<P, A, R>) {
self.return_path.insert(0, participant);
}
pub fn get_timestamp(&self) -> Timestamp {
self.timestamp
}
|
let mut modifiers = vec![];
if let Attribution::Derived(origin, signature) = &self.attribution {
modifiers.push(*signature);
modifiers.extend(origin.get_modifiers());
}
modifiers
}
pub fn get_author(&self) -> Signature<A> {
match &self.attribution {
Attribution::Source(_) => self.author.signature,
Attribution::Derived(message, _) => message.get_author(),
}
}
/// Binds the action fuse to the author's receptor. The fuse will fire
/// once that receptor is released.
pub(super) async fn bind_to_author(&mut self, fuse: ActionFuseHandle) {
if let Some(beacon) = self.return_path.last_mut() {
beacon.add_fuse(fuse).await;
}
}
/// Returns the list of participants for the reply return path.
pub(super) fn get_return_path(&self) -> &Vec<Beacon<P, A, R>> {
&self.return_path
}
/// Returns the message's attribution, which identifies whether it has been modified by a source
/// other than the original author.
pub fn get_attribution(&self) -> &Attribution<P, A, R> {
&self.attribution
}
/// Returns the message's type.
pub fn get_type(&self) -> &MessageType<P, A, R> {
match &self.attribution {
Attribution::Source(message_type) => message_type,
Attribution::Derived(message, _) => message.get_type(),
}
}
/// Returns a reference to the message's payload.
pub fn payload(&self) -> &P {
&self.payload
}
/// Delivers the supplied status to all participants in the return path.
pub(super) async fn report_status(&self, status: Status) {
for beacon in &self.return_path {
beacon.status(status).await.ok();
}
}
}
/// Type definition for a sender handed by the MessageHub to messengers to
/// send actions.
pub(super) type ActionSender<P, A, R> =
UnboundedSender<(Fingerprint<A>, MessageAction<P, A, R>, Option<Beacon<P, A, R>>)>;
/// An internal identifier used by the MessageHub to identify messengers.
pub(super) type MessengerId = usize;
/// An internal identifier used by the `MessageHub` to identify `MessageClient`.
pub(super) type MessageClientId = usize;
pub(super) type CreateMessengerResult<P, A, R> =
Result<(MessengerClient<P, A, R>, Receptor<P, A, R>), MessageError<A>>;
/// Callback for handing back a messenger
pub(super) type MessengerSender<P, A, R> = Sender<CreateMessengerResult<P, A, R>>;
/// Callback for checking on messenger presence
#[cfg(test)]
pub(super) type MessengerPresenceSender<A> = Sender<MessengerPresenceResult<A>>;
#[cfg(test)]
pub(super) type MessengerPresenceResult<A> = Result<bool, MessageError<A>>;
/// Type definition for a sender handed by the MessageHub to spawned components
/// (messenger factories and messengers) to control messengers.
pub(super) type MessengerActionSender<P, A, R> = UnboundedSender<MessengerAction<P, A, R>>;
/// Internal representation of possible actions around a messenger.
pub(super) enum MessengerAction<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
/// Creates a top level messenger
Create(
messenger::Descriptor<P, A, R>,
MessengerSender<P, A, R>,
MessengerActionSender<P, A, R>,
),
#[cfg(test)]
/// Check whether a messenger exists for the given [`Signature`]
CheckPresence(Signature<A>, MessengerPresenceSender<A>),
/// Deletes a messenger by its [`Signature`]
DeleteBySignature(Signature<A>),
}
/// Internal representation for possible actions on a message.
#[derive(Debug)]
pub(super) enum MessageAction<P: Payload + 'static, A: Address + 'static, R: Role + 'static> {
// A new message sent to the specified audience.
Send(P, Attribution<P, A, R>, Timestamp),
// The message has been forwarded by the current holder.
Forward(Message<P, A, R>),
}
|
/// Returns the Signatures of messengers who have modified this message
/// through propagation.
pub fn get_modifiers(&self) -> Vec<Signature<A>> {
|
servicedefinition_tests.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import os
import io
import unittest
import json
from . import servicedefinition
from .fhirdate import FHIRDate
class ServiceDefinitionTests(unittest.TestCase):
|
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ServiceDefinition", js["resourceType"])
return servicedefinition.ServiceDefinition(js)
def testServiceDefinition1(self):
inst = self.instantiate_from("servicedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceDefinition instance")
self.implServiceDefinition1(inst)
js = inst.as_json()
self.assertEqual("ServiceDefinition", js["resourceType"])
inst2 = servicedefinition.ServiceDefinition(js)
self.implServiceDefinition1(inst2)
def implServiceDefinition1(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2015-07-22").date)
self.assertEqual(inst.date.as_json(), "2015-07-22")
self.assertEqual(inst.description, "Guideline appropriate ordering is used to assess appropriateness of an order given a patient, a proposed order, and a set of clinical indications.")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "guildeline-appropriate-ordering")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Guideline Appropriate Ordering Module")
self.assertEqual(inst.topic[0].text, "Guideline Appropriate Ordering")
self.assertEqual(inst.topic[1].text, "Appropriate Use Criteria")
self.assertEqual(inst.version, "1.0.0")
|
|
write.rs
|
//! Writer-based compression/decompression streams
use lzma_sys;
use std::io;
use std::io::prelude::*;
#[cfg(feature = "tokio")]
use futures::Poll;
#[cfg(feature = "tokio")]
use tokio_io::{try_nb, AsyncRead, AsyncWrite};
use crate::stream::{Action, Check, Status, Stream};
/// A compression stream which will have uncompressed data written to it and
/// will write compressed data to an output stream.
pub struct XzEncoder<W: Write> {
data: Stream,
obj: Option<W>,
buf: Vec<u8>,
}
/// A compression stream which will have compressed data written to it and
/// will write uncompressed data to an output stream.
pub struct XzDecoder<W: Write> {
data: Stream,
obj: Option<W>,
buf: Vec<u8>,
}
impl<W: Write> XzEncoder<W> {
/// Create a new compression stream which will compress at the given level
/// to write compress output to the give output stream.
pub fn new(obj: W, level: u32) -> XzEncoder<W> {
let stream = Stream::new_easy_encoder(level, Check::Crc64).unwrap();
XzEncoder::new_stream(obj, stream)
}
/// Create a new encoder which will use the specified `Stream` to encode
/// (compress) data into the provided `obj`.
pub fn new_stream(obj: W, stream: Stream) -> XzEncoder<W> {
XzEncoder {
data: stream,
obj: Some(obj),
buf: Vec::with_capacity(32 * 1024),
}
}
/// Acquires a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
self.obj.as_ref().unwrap()
}
/// Acquires a mutable reference to the underlying writer.
///
/// Note that mutating the output/input state of the stream may corrupt this
/// object, so care must be taken when using this method.
pub fn get_mut(&mut self) -> &mut W {
self.obj.as_mut().unwrap()
}
fn dump(&mut self) -> io::Result<()> {
while self.buf.len() > 0 {
let n = self.obj.as_mut().unwrap().write(&self.buf)?;
self.buf.drain(..n);
}
Ok(())
}
/// Attempt to finish this output stream, writing out final chunks of data.
///
/// Note that this function can only be used once data has finished being
/// written to the output stream. After this function is called then further
/// calls to `write` may result in a panic.
///
/// # Panics
///
/// Attempts to write data to this stream may result in a panic after this
/// function is called.
pub fn try_finish(&mut self) -> io::Result<()> {
loop {
self.dump()?;
let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?;
if res == Status::StreamEnd {
break;
}
}
self.dump()
}
/// Consumes this encoder, flushing the output stream.
///
/// This will flush the underlying data stream and then return the contained
/// writer if the flush succeeded.
///
/// Note that this function may not be suitable to call in a situation where
/// the underlying stream is an asynchronous I/O stream. To finish a stream
/// the `try_finish` (or `shutdown`) method should be used instead. To
/// re-acquire ownership of a stream it is safe to call this method after
/// `try_finish` or `shutdown` has returned `Ok`.
pub fn finish(mut self) -> io::Result<W> {
self.try_finish()?;
Ok(self.obj.take().unwrap())
}
/// Returns the number of bytes produced by the compressor
///
/// Note that, due to buffering, this only bears any relation to
/// `total_in()` after a call to `flush()`. At that point,
/// `total_out() / total_in()` is the compression ratio.
pub fn total_out(&self) -> u64 {
self.data.total_out()
}
/// Returns the number of bytes consumed by the compressor
/// (e.g. the number of bytes written to this stream.)
pub fn total_in(&self) -> u64 {
self.data.total_in()
}
}
impl<W: Write> Write for XzEncoder<W> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
loop {
self.dump()?;
let total_in = self.total_in();
self.data
.process_vec(data, &mut self.buf, Action::Run)
.unwrap();
let written = (self.total_in() - total_in) as usize;
if written > 0 || data.len() == 0 {
return Ok(written);
}
}
}
fn flush(&mut self) -> io::Result<()> {
loop {
self.dump()?;
let status = self
.data
.process_vec(&[], &mut self.buf, Action::FullFlush)
.unwrap();
if status == Status::StreamEnd {
break;
}
}
self.obj.as_mut().unwrap().flush()
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncWrite> AsyncWrite for XzEncoder<W> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try_nb!(self.try_finish());
self.get_mut().shutdown()
}
}
impl<W: Read + Write> Read for XzEncoder<W> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.get_mut().read(buf)
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncRead + AsyncWrite> AsyncRead for XzEncoder<W> {}
impl<W: Write> Drop for XzEncoder<W> {
fn drop(&mut self) {
if self.obj.is_some() {
let _ = self.try_finish();
}
}
}
impl<W: Write> XzDecoder<W> {
/// Creates a new decoding stream which will decode into `obj` one xz stream
/// from the input written to it.
pub fn new(obj: W) -> XzDecoder<W> {
let stream = Stream::new_stream_decoder(u64::max_value(), 0).unwrap();
XzDecoder::new_stream(obj, stream)
}
/// Creates a new decoding stream which will decode into `obj` all the xz streams
/// from the input written to it.
pub fn new_multi_decoder(obj: W) -> XzDecoder<W> {
let stream =
Stream::new_stream_decoder(u64::max_value(), lzma_sys::LZMA_CONCATENATED).unwrap();
XzDecoder::new_stream(obj, stream)
}
/// Creates a new decoding stream which will decode all input written to it
/// into `obj`.
///
/// A custom `stream` can be specified to configure what format this decoder
/// will recognize or configure other various decoding options.
pub fn new_stream(obj: W, stream: Stream) -> XzDecoder<W> {
XzDecoder {
data: stream,
obj: Some(obj),
buf: Vec::with_capacity(32 * 1024),
}
}
/// Acquires a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
self.obj.as_ref().unwrap()
}
/// Acquires a mutable reference to the underlying writer.
///
/// Note that mutating the output/input state of the stream may corrupt this
/// object, so care must be taken when using this method.
pub fn get_mut(&mut self) -> &mut W {
self.obj.as_mut().unwrap()
}
fn dump(&mut self) -> io::Result<()> {
if self.buf.len() > 0 {
self.obj.as_mut().unwrap().write_all(&self.buf)?;
self.buf.truncate(0);
}
Ok(())
}
fn try_finish(&mut self) -> io::Result<()> {
loop {
self.dump()?;
let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?;
// When decoding a truncated file, XZ returns LZMA_BUF_ERROR and
// decodes no new data, which corresponds to this crate's MemNeeded
// status. Since we're finishing, we cannot provide more data so
// this is an error.
//
// See the 02_decompress.c example in xz-utils.
if self.buf.is_empty() && res == Status::MemNeeded {
let msg = "xz compressed stream is truncated or otherwise corrupt";
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, msg));
}
if res == Status::StreamEnd {
break;
}
}
self.dump()
}
/// Unwrap the underlying writer, finishing the compression stream.
pub fn finish(&mut self) -> io::Result<W> {
self.try_finish()?;
Ok(self.obj.take().unwrap())
}
/// Returns the number of bytes produced by the decompressor
///
/// Note that, due to buffering, this only bears any relation to
/// `total_in()` after a call to `flush()`. At that point,
/// `total_in() / total_out()` is the compression ratio.
pub fn total_out(&self) -> u64 {
self.data.total_out()
}
/// Returns the number of bytes consumed by the decompressor
/// (e.g. the number of bytes written to this stream.)
pub fn total_in(&self) -> u64 {
self.data.total_in()
}
}
impl<W: Write> Write for XzDecoder<W> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
loop {
self.dump()?;
let before = self.total_in();
let res = self.data.process_vec(data, &mut self.buf, Action::Run)?;
let written = (self.total_in() - before) as usize;
if written > 0 || data.len() == 0 || res == Status::StreamEnd {
return Ok(written);
}
}
}
fn flush(&mut self) -> io::Result<()> {
self.dump()?;
self.obj.as_mut().unwrap().flush()
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncWrite> AsyncWrite for XzDecoder<W> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try_nb!(self.try_finish());
self.get_mut().shutdown()
}
}
impl<W: Read + Write> Read for XzDecoder<W> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.get_mut().read(buf)
}
}
#[cfg(feature = "tokio")]
impl<W: AsyncRead + AsyncWrite> AsyncRead for XzDecoder<W> {}
impl<W: Write> Drop for XzDecoder<W> {
fn drop(&mut self) {
if self.obj.is_some() {
let _ = self.try_finish();
}
}
}
#[cfg(test)]
mod tests {
use super::{XzDecoder, XzEncoder};
use std::io::prelude::*;
use std::iter::repeat;
#[test]
fn smoke() {
let d = XzDecoder::new(Vec::new());
let mut c = XzEncoder::new(d, 6);
c.write_all(b"12834").unwrap();
let s = repeat("12345").take(100000).collect::<String>();
c.write_all(s.as_bytes()).unwrap();
let data = c.finish().unwrap().finish().unwrap();
assert_eq!(&data[0..5], b"12834");
assert_eq!(data.len(), 500005);
assert!(format!("12834{}", s).as_bytes() == &*data);
}
#[test]
fn write_empty() {
let d = XzDecoder::new(Vec::new());
let mut c = XzEncoder::new(d, 6);
c.write_all(b"").unwrap();
let data = c.finish().unwrap().finish().unwrap();
assert_eq!(&data[..], b"");
}
#[test]
fn qc()
|
}
|
{
::quickcheck::quickcheck(test as fn(_) -> _);
fn test(v: Vec<u8>) -> bool {
let w = XzDecoder::new(Vec::new());
let mut w = XzEncoder::new(w, 6);
w.write_all(&v).unwrap();
v == w.finish().unwrap().finish().unwrap()
}
}
|
compiler_test.go
|
package influxql_test
|
"testing"
"github.com/influxdata/flux"
"github.com/influxdata/influxdb/v2/query/influxql"
)
func TestCompiler(t *testing.T) {
var _ flux.Compiler = (*influxql.Compiler)(nil)
}
|
import (
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from glob import glob
from setuptools import setup, find_packages
import versioneer
pkg_name = 'nbsafety'
def
|
(fname):
with open(fname, 'r', encoding='utf8') as f:
return f.read()
history = read_file('HISTORY.rst')
requirements = read_file('requirements.txt').strip().split()
setup(
name=pkg_name,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Stephen Macke',
author_email='[email protected]',
description='Fearless interactivity for Jupyter notebooks.',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/nbsafety-project/nbsafety',
packages=find_packages(exclude=[
'binder',
'docs',
'scratchspace',
'notebooks',
'img',
'test',
'scripts',
'markdown',
'versioneer.py',
'frontend',
'blueprint.json',
]),
include_package_data=True,
data_files=[
# like `jupyter nbextension install --sys-prefix`
("share/jupyter/nbextensions/nbsafety", [
"nbsafety/resources/nbextension/index.js",
"nbsafety/resources/nbextension/index.js.map",
]),
# like `jupyter nbextension enable --sys-prefix`
("etc/jupyter/nbconfig/notebook.d", [
"nbsafety/resources/nbextension/nbsafety.json",
]),
("share/jupyter/labextensions/jupyterlab-nbsafety",
glob("nbsafety/resources/labextension/package.json")
),
("share/jupyter/labextensions/jupyterlab-nbsafety/static",
glob("nbsafety/resources/labextension/static/*")
),
# like `python -m nbsafety.install --sys-prefix`
("share/jupyter/kernels/nbsafety", [
"nbsafety/resources/kernel/kernel.json",
"nbsafety/resources/kernel/logo-32x32.png",
"nbsafety/resources/kernel/logo-64x64.png",
]),
],
install_requires=requirements,
license='BSD-3-Clause',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
# python setup.py sdist bdist_wheel --universal
# twine upload dist/*
|
read_file
|
sawtooth.rs
|
// Copyright 2019 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Sawtooth Compatibility Layer
//!
//! This module provides a compatibility layer for use with [Hyperledger
//! Sawtooth](https://sawtooth.hyperledger.org) transaction families. It provides adapters to
//! allow the use of existing Sawtooth transaction families, implemented with the [Rust
//! SDK](https://crates.io/crates/sawtooth-sdk), in an application built with Transact.
//!
//! Note, to use this module, the Transact library must have the `"sawtooth-compat"` feature
//! enabled.
use std::fmt::Write as FmtWrite;
use sawtooth_sdk::messages::processor::TpProcessRequest;
use sawtooth_sdk::messages::transaction::TransactionHeader as SawtoothTxnHeader;
use sawtooth_sdk::processor::handler::{
ApplyError as SawtoothApplyError, ContextError as SawtoothContextError,
TransactionContext as SawtoothContext, TransactionHandler as SawtoothTransactionHandler,
};
use crate::handler::{ApplyError, ContextError, TransactionContext, TransactionHandler};
use crate::protocol::transaction::{TransactionHeader, TransactionPair};
/// Adapts a Sawtooth Transaction Handler to a Transact TransactionHandler.
///
/// This adapter allows Sawtooth SDK TransactionHandler implementations to be used with the
/// Transact static execution adapters. Existing Sawtooth transaction families and smart contract
/// engines can then be compiled in to an application using Transact.
///
/// For example, the Sawtooth transaction handler for the [XO Transaction
/// Family](https://sawtooth.hyperledger.org/docs/core/releases/latest/transaction_family_specifications/xo_transaction_family.html)
/// can be adapted as follows:
///
/// # use sawtooth_xo::handler::XoTransactionHandler;
/// # use transact::context::manager::sync::ContextManager;
/// # use transact::database::btree::BTreeDatabase;
/// # use transact::execution::adapter::static_adapter::StaticExecutionAdapter;
/// # use transact::sawtooth::SawtoothToTransactHandlerAdapter;
/// # use transact::state::merkle::{self, MerkleRadixTree, MerkleState};
/// #
/// # let db = Box::new(BTreeDatabase::new(&merkle::INDEXES));
/// # let context_manager = ContextManager::new(Box::new(MerkleState::new(db.clone())));
/// let execution_adapter = StaticExecutionAdapter::new_adapter(
/// vec![Box::new(SawtoothToTransactHandlerAdapter::new(
/// XoTransactionHandler::new(),
/// ))],
/// context_manager,
/// );
/// # let _ignore = execution_adapter;
pub struct SawtoothToTransactHandlerAdapter<H: SawtoothTransactionHandler + Send> {
family_name: String,
family_versions: Vec<String>,
handler: H,
}
impl<H: SawtoothTransactionHandler + Send> SawtoothToTransactHandlerAdapter<H> {
/// Constructs a new Sawtooth to Transact handler adapter.
pub fn new(handler: H) -> Self {
SawtoothToTransactHandlerAdapter {
family_name: handler.family_name().clone(),
family_versions: handler.family_versions().clone(),
handler,
}
}
}
impl<H: SawtoothTransactionHandler + Send> TransactionHandler
for SawtoothToTransactHandlerAdapter<H>
{
fn family_name(&self) -> &str {
&self.family_name
}
fn family_versions(&self) -> &[String] {
&self.family_versions
}
fn apply(
&self,
transaction_pair: &TransactionPair,
context: &mut dyn TransactionContext,
) -> Result<(), ApplyError> {
let request = txn_pair_to_process_request(transaction_pair);
let mut context_adapter = TransactToSawtoothContextAdapter::new(context);
self.handler
.apply(&request, &mut context_adapter)
.map_err(|err| match err {
SawtoothApplyError::InvalidTransaction(error_message) => {
ApplyError::InvalidTransaction(error_message)
}
SawtoothApplyError::InternalError(error_message) => {
ApplyError::InternalError(error_message)
}
})
}
}
struct TransactToSawtoothContextAdapter<'a> {
transact_context: &'a TransactionContext,
}
impl<'a> TransactToSawtoothContextAdapter<'a> {
fn new(transact_context: &'a TransactionContext) -> Self {
TransactToSawtoothContextAdapter { transact_context }
}
}
impl<'a> SawtoothContext for TransactToSawtoothContextAdapter<'a> {
fn get_state_entry(&self, address: &str) -> Result<Option<Vec<u8>>, SawtoothContextError> {
let results = self
.transact_context
.get_state_entries(&[address.to_owned()])
.map_err(to_context_error)?;
// take the first item, if it exists
Ok(results.into_iter().next().map(|(_, v)| v))
}
fn get_state_entries(
&self,
addresses: &[String],
) -> Result<Vec<(String, Vec<u8>)>, SawtoothContextError> {
self.transact_context
.get_state_entries(addresses)
.map_err(to_context_error)
}
fn set_state_entry(&self, address: String, data: Vec<u8>) -> Result<(), SawtoothContextError> {
self.set_state_entries(vec![(address, data)])
}
fn set_state_entries(
&self,
entries: Vec<(String, Vec<u8>)>,
) -> Result<(), SawtoothContextError> {
self.transact_context
.set_state_entries(entries)
.map_err(to_context_error)
}
fn delete_state_entry(&self, address: &str) -> Result<Option<String>, SawtoothContextError> {
Ok(self
.delete_state_entries(&[address.to_owned()])?
.into_iter()
.next())
}
fn delete_state_entries(
&self,
addresses: &[String],
) -> Result<Vec<String>, SawtoothContextError> {
self.transact_context
.delete_state_entries(addresses)
.map_err(to_context_error)
}
fn add_receipt_data(&self, data: &[u8]) -> Result<(), SawtoothContextError> {
self.transact_context
.add_receipt_data(data.to_vec())
.map_err(to_context_error)
}
fn add_event(
&self,
event_type: String,
attributes: Vec<(String, String)>,
data: &[u8],
) -> Result<(), SawtoothContextError> {
self.transact_context
.add_event(event_type, attributes, data.to_vec())
.map_err(to_context_error)
}
}
fn txn_pair_to_process_request(transaction_pair: &TransactionPair) -> TpProcessRequest {
let mut process_request = TpProcessRequest::new();
let header = as_sawtooth_header(transaction_pair.header());
process_request.set_header(header);
let txn = transaction_pair.transaction();
process_request.set_payload(txn.payload().to_vec());
process_request.set_signature(txn.header_signature().to_owned());
process_request
}
fn as_sawtooth_header(header: &TransactionHeader) -> SawtoothTxnHeader {
let mut sawtooth_header = SawtoothTxnHeader::new();
sawtooth_header.set_family_name(header.family_name().to_owned());
sawtooth_header.set_family_version(header.family_version().to_owned());
sawtooth_header.set_signer_public_key(hex::encode(&header.signer_public_key()));
sawtooth_header.set_batcher_public_key(hex::encode(&header.batcher_public_key()));
sawtooth_header.set_dependencies(header.dependencies().iter().map(hex::encode).collect());
sawtooth_header.set_inputs(header.inputs().iter().map(hex::encode).collect());
sawtooth_header.set_outputs(header.outputs().iter().map(hex::encode).collect());
sawtooth_header.set_nonce(hex::encode(&header.nonce()));
sawtooth_header
}
fn to_context_error(err: ContextError) -> SawtoothContextError {
SawtoothContextError::ReceiveError(Box::new(err))
}
#[cfg(test)]
mod xo_compat_test {
use std::panic;
use std::sync::{Arc, Mutex};
use sawtooth_xo::handler::XoTransactionHandler;
use sha2::{Digest, Sha512};
use crate::context::manager::sync::ContextManager;
use crate::database::{btree::BTreeDatabase, Database};
use crate::execution::{adapter::static_adapter::StaticExecutionAdapter, executor::Executor};
use crate::protocol::{
batch::{BatchBuilder, BatchPair},
receipt::StateChange,
transaction::{HashMethod, TransactionBuilder},
};
use crate::scheduler::{
serial::SerialScheduler, BatchExecutionResult, Scheduler, TransactionExecutionResult,
};
use crate::signing::{hash::HashSigner, Signer};
use crate::state::merkle::{self, MerkleRadixTree, MerkleState};
use super::*;
/// Test that the compatibility handler executes a create game transaction.
///
/// #. Configure an executor with the XoTransactionHandler
/// #. Create a scheduler and add a single transaction to create an XO game
/// #. Wait until the result is returned
/// #. Verify that the result is a) valid and b) has the appropriate state changes
#[test]
fn execute_create_xo_game() {
let db = Box::new(BTreeDatabase::new(&merkle::INDEXES));
let context_manager = ContextManager::new(Box::new(MerkleState::new(db.clone())));
let executor = create_executor(&context_manager);
start_executor(&executor);
let test_executor = executor.clone();
let panic_check = panic::catch_unwind(move || {
let signer = HashSigner::new(vec![00u8, 01, 02]);
let batch_pair = create_batch(&signer, "my_game", "my_game,create,");
let state_root = initial_db_root(&*db);
let mut scheduler = SerialScheduler::new(Box::new(context_manager), state_root.clone())
.expect("Failed to create scheduler");
let (result_tx, result_rx) = std::sync::mpsc::channel();
scheduler
.set_result_callback(Box::new(move |batch_result| {
result_tx
.send(batch_result)
.expect("Unable to send batch result")
}))
.expect("Failed to set result callback");
scheduler
.add_batch(batch_pair)
.expect("Failed to add batch");
scheduler.finalize().expect("Failed to finalize scheduler");
run_schedule(&test_executor, &mut scheduler);
let batch_result = result_rx
.recv()
.expect("Unable to receive result from executor")
.expect("Should not have received None from the executor");
scheduler.shutdown();
assert_state_changes(
vec![StateChange::Set {
key: calculate_game_address("my_game"),
value: "my_game,---------,P1-NEXT,,".as_bytes().to_vec(),
}],
batch_result,
);
});
stop_executor(&executor);
assert!(panic_check.is_ok());
}
///
/// Test that the compatibility handler executes multiple transactions and returns the correct
/// receipts with the expected state changes.
///
/// #. Configure an executor with the XoTransactionHandler
/// #. Create a scheduler and add transactions to create an XO game and take a space
/// #. Wait until the result is returned
/// #. Verify that the result is a) valid and b) has the appropriate state changes
#[test]
fn execute_multiple_xo_transactions() {
let db = Box::new(BTreeDatabase::new(&merkle::INDEXES));
let context_manager = ContextManager::new(Box::new(MerkleState::new(db.clone())));
let executor = create_executor(&context_manager);
start_executor(&executor);
let test_executor = executor.clone();
let panic_check = panic::catch_unwind(move || {
let signer = HashSigner::new(vec![00u8, 01, 02]);
let create_batch_pair = create_batch(&signer, "my_game", "my_game,create,");
let take_batch_pair = create_batch(&signer, "my_game", "my_game,take,1");
let state_root = initial_db_root(&*db);
let mut scheduler = SerialScheduler::new(Box::new(context_manager), state_root.clone())
.expect("Failed to create scheduler");
let (result_tx, result_rx) = std::sync::mpsc::channel();
scheduler
.set_result_callback(Box::new(move |batch_result| {
result_tx
.send(batch_result)
.expect("Unable to send batch result")
}))
.expect("Failed to set result callback");
scheduler
.add_batch(create_batch_pair)
.expect("Failed to add 1st batch");
scheduler
.add_batch(take_batch_pair)
.expect("Failed to add 2nd batch");
scheduler.finalize().expect("Failed to finalize scheduler");
run_schedule(&test_executor, &mut scheduler);
let create_batch_result = result_rx
.recv()
.expect("Unable to receive result from executor")
.expect("Should not have received None from the executor");
let take_batch_result = result_rx
.recv()
.expect("Unable to receive result from executor")
.expect("Should not have received None from the executor");
scheduler.shutdown();
assert_state_changes(
vec![StateChange::Set {
key: calculate_game_address("my_game"),
value: "my_game,---------,P1-NEXT,,".as_bytes().to_vec(),
}],
create_batch_result,
);
assert_state_changes(
vec![StateChange::Set {
key: calculate_game_address("my_game"),
value: format!(
"my_game,X--------,P2-NEXT,{},",
hex::encode(signer.public_key())
)
.into_bytes(),
}],
take_batch_result,
);
});
stop_executor(&executor);
assert!(panic_check.is_ok());
}
fn assert_state_changes(
expected_state_changes: Vec<StateChange>,
batch_result: BatchExecutionResult,
) {
assert_eq!(1, batch_result.results.len());
let mut batch_result = batch_result;
let txn_result = batch_result
.results
.pop()
.expect("Length 1, but no first element");
let receipt = match txn_result {
TransactionExecutionResult::Valid(receipt) => receipt,
TransactionExecutionResult::Invalid(invalid_result) => {
panic!("Transaction failed: {:?}", invalid_result)
}
};
assert_eq!(expected_state_changes, receipt.state_changes);
}
fn create_batch(signer: &Signer, game_name: &str, payload: &str) -> BatchPair {
let game_address = calculate_game_address(game_name);
let txn_pair = TransactionBuilder::new()
.with_batcher_public_key(signer.public_key().to_vec())
.with_family_name("xo".to_string())
.with_family_version("1.0".to_string())
.with_inputs(vec![hex::decode(&game_address).unwrap()])
.with_nonce("test_nonce".as_bytes().to_vec())
.with_outputs(vec![hex::decode(&game_address).unwrap()])
.with_payload_hash_method(HashMethod::SHA512)
.with_payload(payload.as_bytes().to_vec())
.build_pair(signer)
.expect("The TransactionBuilder was not given the correct items");
BatchBuilder::new()
.with_transactions(vec![txn_pair.take().0])
.build_pair(signer)
.expect("Unable to build batch a pair")
}
fn create_executor(context_manager: &ContextManager) -> Arc<Mutex<Option<Executor>>> {
Arc::new(Mutex::new(Some(Executor::new(vec![Box::new(
StaticExecutionAdapter::new_adapter(
vec![Box::new(SawtoothToTransactHandlerAdapter::new(
|
)
.expect("Unable to create static execution adapter"),
)]))))
}
fn start_executor(executor: &Arc<Mutex<Option<Executor>>>) {
executor
.lock()
.expect("Should not have poisoned the lock")
.as_mut()
.expect("Should not be None")
.start()
.expect("Start should not have failed");
}
fn run_schedule(executor: &Arc<Mutex<Option<Executor>>>, scheduler: &mut dyn Scheduler) {
let task_iterator = scheduler
.take_task_iterator()
.expect("Failed to take task iterator");
executor
.lock()
.expect("Should not have poisoned the lock")
.as_ref()
.expect("Should not be None")
.execute(
task_iterator,
scheduler.new_notifier().expect("Failed to get notifier"),
)
.expect("Failed to execute schedule");
}
fn stop_executor(executor: &Arc<Mutex<Option<Executor>>>) {
let stoppable = executor
.lock()
.expect("Should not have poisoned the lock")
.take()
.expect("Should not be None");
stoppable.stop();
}
fn calculate_game_address<S: AsRef<[u8]>>(name: S) -> String {
let mut sha = Sha512::default();
sha.input(name);
"5b7349".to_owned() + &hex::encode(&sha.result())[..64]
}
fn initial_db_root(db: &dyn Database) -> String {
let merkle_db =
MerkleRadixTree::new(db.clone_box(), None).expect("Cannot initialize merkle database");
merkle_db.get_merkle_root()
}
}
|
XoTransactionHandler::new(),
))],
context_manager.clone(),
|
select-org.component.ts
|
import { first } from 'rxjs/operators';
import { Component, OnInit, AfterViewInit } from '@angular/core';
import { FormService } from '@sunbird/core';
import { ActivatedRoute } from '@angular/router';
import { TenantService } from '@sunbird/core';
import { ResourceService, NavigationHelperService } from '@sunbird/shared';
import { get } from 'lodash-es';
@Component({
templateUrl: './select-org.component.html',
styleUrls: ['./select-org.component.scss']
})
export class
|
implements OnInit, AfterViewInit {
public selectedOrg: any;
public orgList: Array<any>;
public errorUrl = '/sso/sign-in/error';
public telemetryImpression;
public tenantInfo: any = {};
public disableSubmitBtn = true;
public submitOrgInteractEdata;
constructor(private formService: FormService, public activatedRoute: ActivatedRoute, private tenantService: TenantService,
public resourceService: ResourceService, public navigationhelperService: NavigationHelperService) { }
ngOnInit() {
this.setTenantInfo();
this.setTelemetryData();
this.setRedirectUriCookie();
this.getSsoOrgList().subscribe(formData => this.orgList = formData,
error => console.log('no org configured in form')); // show toaster message
}
private setTenantInfo() {
this.tenantService.tenantData$.pipe(first()).subscribe(data => {
if (!data.err) {
this.tenantInfo = {
logo: data.tenantData.logo,
tenantName: data.tenantData.titleName
};
}
});
}
public handleOrgChange(event) {
this.disableSubmitBtn = false;
}
private getSsoOrgList() {
const formServiceInputParams = {
formType: 'organization',
formAction: 'sign-in',
contentType: 'organization'
};
return this.formService.getFormConfig(formServiceInputParams);
}
public handleOrgSelection(event) {
window.location.href = this.selectedOrg;
}
ngAfterViewInit () {
setTimeout(() => {
this.telemetryImpression = {
context: {
env: this.activatedRoute.snapshot.data.telemetry.env,
},
edata: {
type: this.activatedRoute.snapshot.data.telemetry.type,
pageid: this.activatedRoute.snapshot.data.telemetry.pageid,
uri: this.activatedRoute.snapshot.data.telemetry.uri,
duration: this.navigationhelperService.getPageLoadTime()
}
};
});
}
private setTelemetryData() {
this.submitOrgInteractEdata = {
id: 'submit-org',
type: 'click',
pageid: 'sso-sign-in',
};
}
private setRedirectUriCookie() {
const redirectUri = get(this.activatedRoute, 'snapshot.queryParams.redirect_uri');
if (redirectUri) { document.cookie = `SSO_REDIRECT_URI=${redirectUri}; path=/`; }
}
}
|
SelectOrgComponent
|
zz_generated.deepcopy.go
|
// +build !ignore_autogenerated
/*
Copyright 2021 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"github.com/openkruise/kruise/apis/apps/pub"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdaptiveWorkloadSpreadStrategy) DeepCopyInto(out *AdaptiveWorkloadSpreadStrategy) {
*out = *in
if in.RescheduleCriticalSeconds != nil {
in, out := &in.RescheduleCriticalSeconds, &out.RescheduleCriticalSeconds
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveWorkloadSpreadStrategy.
func (in *AdaptiveWorkloadSpreadStrategy) DeepCopy() *AdaptiveWorkloadSpreadStrategy {
if in == nil {
return nil
}
out := new(AdaptiveWorkloadSpreadStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedCronJob) DeepCopyInto(out *AdvancedCronJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCronJob.
func (in *AdvancedCronJob) DeepCopy() *AdvancedCronJob {
if in == nil {
return nil
}
out := new(AdvancedCronJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AdvancedCronJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedCronJobList) DeepCopyInto(out *AdvancedCronJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AdvancedCronJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCronJobList.
func (in *AdvancedCronJobList) DeepCopy() *AdvancedCronJobList {
if in == nil {
return nil
}
out := new(AdvancedCronJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AdvancedCronJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedCronJobSpec) DeepCopyInto(out *AdvancedCronJobSpec) {
*out = *in
if in.StartingDeadlineSeconds != nil {
in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.Paused != nil {
in, out := &in.Paused, &out.Paused
*out = new(bool)
**out = **in
}
if in.SuccessfulJobsHistoryLimit != nil {
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedJobsHistoryLimit != nil {
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
*out = new(int32)
**out = **in
}
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCronJobSpec.
func (in *AdvancedCronJobSpec) DeepCopy() *AdvancedCronJobSpec {
if in == nil {
return nil
}
out := new(AdvancedCronJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedCronJobStatus) DeepCopyInto(out *AdvancedCronJobStatus) {
*out = *in
if in.Active != nil {
in, out := &in.Active, &out.Active
*out = make([]v1.ObjectReference, len(*in))
copy(*out, *in)
}
if in.LastScheduleTime != nil {
in, out := &in.LastScheduleTime, &out.LastScheduleTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedCronJobStatus.
func (in *AdvancedCronJobStatus) DeepCopy() *AdvancedCronJobStatus {
if in == nil {
return nil
}
out := new(AdvancedCronJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedStatefulSetTemplateSpec) DeepCopyInto(out *AdvancedStatefulSetTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedStatefulSetTemplateSpec.
func (in *AdvancedStatefulSetTemplateSpec) DeepCopy() *AdvancedStatefulSetTemplateSpec {
if in == nil {
return nil
}
out := new(AdvancedStatefulSetTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BroadcastJob) DeepCopyInto(out *BroadcastJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BroadcastJob.
func (in *BroadcastJob) DeepCopy() *BroadcastJob {
if in == nil {
return nil
}
out := new(BroadcastJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BroadcastJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BroadcastJobList) DeepCopyInto(out *BroadcastJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]BroadcastJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BroadcastJobList.
func (in *BroadcastJobList) DeepCopy() *BroadcastJobList {
if in == nil {
return nil
}
out := new(BroadcastJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BroadcastJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BroadcastJobSpec) DeepCopyInto(out *BroadcastJobSpec) {
*out = *in
if in.Parallelism != nil {
in, out := &in.Parallelism, &out.Parallelism
*out = new(intstr.IntOrString)
**out = **in
}
in.Template.DeepCopyInto(&out.Template)
in.CompletionPolicy.DeepCopyInto(&out.CompletionPolicy)
out.FailurePolicy = in.FailurePolicy
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BroadcastJobSpec.
func (in *BroadcastJobSpec) DeepCopy() *BroadcastJobSpec {
if in == nil {
return nil
}
out := new(BroadcastJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BroadcastJobStatus) DeepCopyInto(out *BroadcastJobStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]JobCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BroadcastJobStatus.
func (in *BroadcastJobStatus) DeepCopy() *BroadcastJobStatus {
if in == nil {
return nil
}
out := new(BroadcastJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BroadcastJobTemplateSpec) DeepCopyInto(out *BroadcastJobTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BroadcastJobTemplateSpec.
func (in *BroadcastJobTemplateSpec) DeepCopy() *BroadcastJobTemplateSpec {
if in == nil {
return nil
}
out := new(BroadcastJobTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSet) DeepCopyInto(out *CloneSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSet.
func (in *CloneSet) DeepCopy() *CloneSet {
if in == nil {
return nil
}
out := new(CloneSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CloneSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetCondition) DeepCopyInto(out *CloneSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetCondition.
func (in *CloneSetCondition) DeepCopy() *CloneSetCondition {
if in == nil {
return nil
}
out := new(CloneSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetList) DeepCopyInto(out *CloneSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CloneSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetList.
func (in *CloneSetList) DeepCopy() *CloneSetList {
if in == nil {
return nil
}
out := new(CloneSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CloneSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetScaleStrategy) DeepCopyInto(out *CloneSetScaleStrategy) {
*out = *in
if in.PodsToDelete != nil {
in, out := &in.PodsToDelete, &out.PodsToDelete
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetScaleStrategy.
func (in *CloneSetScaleStrategy) DeepCopy() *CloneSetScaleStrategy {
if in == nil {
return nil
}
out := new(CloneSetScaleStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetSpec) DeepCopyInto(out *CloneSetSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.VolumeClaimTemplates != nil {
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
*out = make([]v1.PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ScaleStrategy.DeepCopyInto(&out.ScaleStrategy)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
if in.Lifecycle != nil {
in, out := &in.Lifecycle, &out.Lifecycle
*out = new(pub.Lifecycle)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetSpec.
func (in *CloneSetSpec) DeepCopy() *CloneSetSpec {
if in == nil {
return nil
}
out := new(CloneSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetStatus) DeepCopyInto(out *CloneSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]CloneSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetStatus.
func (in *CloneSetStatus) DeepCopy() *CloneSetStatus {
if in == nil {
return nil
}
out := new(CloneSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetTemplateSpec) DeepCopyInto(out *CloneSetTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetTemplateSpec.
func (in *CloneSetTemplateSpec) DeepCopy() *CloneSetTemplateSpec {
if in == nil {
return nil
}
out := new(CloneSetTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloneSetUpdateStrategy) DeepCopyInto(out *CloneSetUpdateStrategy) {
*out = *in
if in.Partition != nil {
in, out := &in.Partition, &out.Partition
*out = new(intstr.IntOrString)
**out = **in
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
**out = **in
}
if in.PriorityStrategy != nil {
in, out := &in.PriorityStrategy, &out.PriorityStrategy
*out = new(pub.UpdatePriorityStrategy)
(*in).DeepCopyInto(*out)
}
if in.ScatterStrategy != nil {
in, out := &in.ScatterStrategy, &out.ScatterStrategy
*out = make(UpdateScatterStrategy, len(*in))
copy(*out, *in)
}
if in.InPlaceUpdateStrategy != nil {
in, out := &in.InPlaceUpdateStrategy, &out.InPlaceUpdateStrategy
*out = new(pub.InPlaceUpdateStrategy)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneSetUpdateStrategy.
func (in *CloneSetUpdateStrategy) DeepCopy() *CloneSetUpdateStrategy {
if in == nil {
return nil
}
out := new(CloneSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CompletionPolicy) DeepCopyInto(out *CompletionPolicy) {
*out = *in
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompletionPolicy.
func (in *CompletionPolicy) DeepCopy() *CompletionPolicy {
if in == nil {
return nil
}
out := new(CompletionPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequest) DeepCopyInto(out *ContainerRecreateRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequest.
func (in *ContainerRecreateRequest) DeepCopy() *ContainerRecreateRequest {
if in == nil {
return nil
}
out := new(ContainerRecreateRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ContainerRecreateRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestContainer) DeepCopyInto(out *ContainerRecreateRequestContainer) {
*out = *in
if in.PreStop != nil {
in, out := &in.PreStop, &out.PreStop
*out = new(v1.Handler)
(*in).DeepCopyInto(*out)
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]v1.ContainerPort, len(*in))
copy(*out, *in)
}
if in.StatusContext != nil {
in, out := &in.StatusContext, &out.StatusContext
*out = new(ContainerRecreateRequestContainerContext)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestContainer.
func (in *ContainerRecreateRequestContainer) DeepCopy() *ContainerRecreateRequestContainer {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestContainer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestContainerContext) DeepCopyInto(out *ContainerRecreateRequestContainerContext) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestContainerContext.
func (in *ContainerRecreateRequestContainerContext) DeepCopy() *ContainerRecreateRequestContainerContext {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestContainerContext)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestContainerRecreateState) DeepCopyInto(out *ContainerRecreateRequestContainerRecreateState) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestContainerRecreateState.
func (in *ContainerRecreateRequestContainerRecreateState) DeepCopy() *ContainerRecreateRequestContainerRecreateState {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestContainerRecreateState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestList) DeepCopyInto(out *ContainerRecreateRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ContainerRecreateRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestList.
func (in *ContainerRecreateRequestList) DeepCopy() *ContainerRecreateRequestList {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ContainerRecreateRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestSpec) DeepCopyInto(out *ContainerRecreateRequestSpec) {
*out = *in
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]ContainerRecreateRequestContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Strategy != nil {
in, out := &in.Strategy, &out.Strategy
*out = new(ContainerRecreateRequestStrategy)
(*in).DeepCopyInto(*out)
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestSpec.
func (in *ContainerRecreateRequestSpec) DeepCopy() *ContainerRecreateRequestSpec {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestStatus) DeepCopyInto(out *ContainerRecreateRequestStatus) {
*out = *in
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.ContainerRecreateStates != nil {
in, out := &in.ContainerRecreateStates, &out.ContainerRecreateStates
*out = make([]ContainerRecreateRequestContainerRecreateState, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestStatus.
func (in *ContainerRecreateRequestStatus) DeepCopy() *ContainerRecreateRequestStatus {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestStrategy) DeepCopyInto(out *ContainerRecreateRequestStrategy) {
*out = *in
if in.TerminationGracePeriodSeconds != nil {
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
*out = new(int64)
**out = **in
}
if in.UnreadyGracePeriodSeconds != nil {
in, out := &in.UnreadyGracePeriodSeconds, &out.UnreadyGracePeriodSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestStrategy.
func (in *ContainerRecreateRequestStrategy) DeepCopy() *ContainerRecreateRequestStrategy {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerRecreateRequestSyncContainerStatus) DeepCopyInto(out *ContainerRecreateRequestSyncContainerStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRecreateRequestSyncContainerStatus.
func (in *ContainerRecreateRequestSyncContainerStatus) DeepCopy() *ContainerRecreateRequestSyncContainerStatus {
if in == nil {
return nil
}
out := new(ContainerRecreateRequestSyncContainerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobTemplate) DeepCopyInto(out *CronJobTemplate) {
*out = *in
if in.JobTemplate != nil {
in, out := &in.JobTemplate, &out.JobTemplate
*out = new(v1beta1.JobTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.BroadcastJobTemplate != nil {
in, out := &in.BroadcastJobTemplate, &out.BroadcastJobTemplate
*out = new(BroadcastJobTemplateSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobTemplate.
func (in *CronJobTemplate) DeepCopy() *CronJobTemplate {
if in == nil {
return nil
}
out := new(CronJobTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
if in == nil {
return nil
}
out := new(DaemonSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.BurstReplicas != nil {
in, out := &in.BurstReplicas, &out.BurstReplicas
*out = new(intstr.IntOrString)
**out = **in
}
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DaemonSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateDaemonSet)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentTemplateSpec) DeepCopyInto(out *DeploymentTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTemplateSpec.
func (in *DeploymentTemplateSpec) DeepCopy() *DeploymentTemplateSpec {
if in == nil {
return nil
}
out := new(DeploymentTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailurePolicy) DeepCopyInto(out *FailurePolicy) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailurePolicy.
func (in *FailurePolicy) DeepCopy() *FailurePolicy {
if in == nil {
return nil
}
out := new(FailurePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJob) DeepCopyInto(out *ImagePullJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJob.
func (in *ImagePullJob) DeepCopy() *ImagePullJob {
if in == nil {
return nil
}
out := new(ImagePullJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImagePullJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJobList) DeepCopyInto(out *ImagePullJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImagePullJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJobList.
func (in *ImagePullJobList) DeepCopy() *ImagePullJobList {
if in == nil {
return nil
}
out := new(ImagePullJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImagePullJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJobNodeSelector) DeepCopyInto(out *ImagePullJobNodeSelector) {
*out = *in
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJobNodeSelector.
func (in *ImagePullJobNodeSelector) DeepCopy() *ImagePullJobNodeSelector {
if in == nil {
return nil
}
out := new(ImagePullJobNodeSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJobPodSelector) DeepCopyInto(out *ImagePullJobPodSelector) {
*out = *in
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJobPodSelector.
func (in *ImagePullJobPodSelector) DeepCopy() *ImagePullJobPodSelector {
if in == nil {
return nil
}
out := new(ImagePullJobPodSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJobSpec) DeepCopyInto(out *ImagePullJobSpec) {
*out = *in
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(ImagePullJobNodeSelector)
(*in).DeepCopyInto(*out)
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(ImagePullJobPodSelector)
(*in).DeepCopyInto(*out)
}
if in.Parallelism != nil {
in, out := &in.Parallelism, &out.Parallelism
*out = new(intstr.IntOrString)
**out = **in
}
if in.PullPolicy != nil {
in, out := &in.PullPolicy, &out.PullPolicy
*out = new(PullPolicy)
(*in).DeepCopyInto(*out)
}
in.CompletionPolicy.DeepCopyInto(&out.CompletionPolicy)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJobSpec.
func (in *ImagePullJobSpec) DeepCopy() *ImagePullJobSpec {
if in == nil {
return nil
}
out := new(ImagePullJobSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImagePullJobStatus) DeepCopyInto(out *ImagePullJobStatus) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
if in.FailedNodes != nil {
in, out := &in.FailedNodes, &out.FailedNodes
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullJobStatus.
func (in *ImagePullJobStatus) DeepCopy() *ImagePullJobStatus {
if in == nil {
return nil
}
out := new(ImagePullJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
*out = *in
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]ReferenceObject, len(*in))
copy(*out, *in)
}
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]ImageTagSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
func (in *ImageSpec) DeepCopy() *ImageSpec {
if in == nil {
return nil
}
out := new(ImageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]ImageTagStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
func (in *ImageStatus) DeepCopy() *ImageStatus {
if in == nil {
return nil
}
out := new(ImageStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageTagPullPolicy) DeepCopyInto(out *ImageTagPullPolicy) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
if in.BackoffLimit != nil {
in, out := &in.BackoffLimit, &out.BackoffLimit
*out = new(int32)
**out = **in
}
if in.TTLSecondsAfterFinished != nil {
in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
*out = new(int32)
**out = **in
}
if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagPullPolicy.
func (in *ImageTagPullPolicy) DeepCopy() *ImageTagPullPolicy {
if in == nil {
return nil
}
out := new(ImageTagPullPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageTagSpec) DeepCopyInto(out *ImageTagSpec) {
*out = *in
if in.CreatedAt != nil {
in, out := &in.CreatedAt, &out.CreatedAt
*out = (*in).DeepCopy()
}
if in.PullPolicy != nil {
in, out := &in.PullPolicy, &out.PullPolicy
*out = new(ImageTagPullPolicy)
(*in).DeepCopyInto(*out)
}
if in.OwnerReferences != nil {
in, out := &in.OwnerReferences, &out.OwnerReferences
*out = make([]v1.ObjectReference, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagSpec.
func (in *ImageTagSpec) DeepCopy() *ImageTagSpec {
if in == nil {
return nil
}
out := new(ImageTagSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageTagStatus) DeepCopyInto(out *ImageTagStatus) {
*out = *in
if in.StartTime != nil {
in, out := &in.StartTime, &out.StartTime
*out = (*in).DeepCopy()
}
if in.CompletionTime != nil {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagStatus.
func (in *ImageTagStatus) DeepCopy() *ImageTagStatus {
if in == nil {
return nil
}
out := new(ImageTagStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobCondition) DeepCopyInto(out *JobCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition.
func (in *JobCondition) DeepCopy() *JobCondition {
if in == nil {
return nil
}
out := new(JobCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManualUpdate) DeepCopyInto(out *ManualUpdate) {
*out = *in
if in.Partitions != nil {
in, out := &in.Partitions, &out.Partitions
*out = make(map[string]int32, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManualUpdate.
func (in *ManualUpdate) DeepCopy() *ManualUpdate {
if in == nil {
return nil
}
out := new(ManualUpdate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImage) DeepCopyInto(out *NodeImage) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImage.
func (in *NodeImage) DeepCopy() *NodeImage {
if in == nil {
return nil
}
out := new(NodeImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeImage) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageList) DeepCopyInto(out *NodeImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]NodeImage, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageList.
func (in *NodeImageList) DeepCopy() *NodeImageList {
if in == nil {
return nil
}
out := new(NodeImageList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeImageList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageSpec) DeepCopyInto(out *NodeImageSpec) {
*out = *in
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make(map[string]ImageSpec, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageSpec.
func (in *NodeImageSpec) DeepCopy() *NodeImageSpec {
if in == nil {
return nil
}
out := new(NodeImageSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeImageStatus) DeepCopyInto(out *NodeImageStatus) {
*out = *in
if in.ImageStatuses != nil {
in, out := &in.ImageStatuses, &out.ImageStatuses
*out = make(map[string]ImageStatus, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.FirstSyncStatus != nil {
in, out := &in.FirstSyncStatus, &out.FirstSyncStatus
*out = new(SyncStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeImageStatus.
func (in *NodeImageStatus) DeepCopy() *NodeImageStatus {
if in == nil {
return nil
}
out := new(NodeImageStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PullPolicy) DeepCopyInto(out *PullPolicy) {
*out = *in
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int32)
**out = **in
}
|
if in.BackoffLimit != nil {
in, out := &in.BackoffLimit, &out.BackoffLimit
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullPolicy.
func (in *PullPolicy) DeepCopy() *PullPolicy {
if in == nil {
return nil
}
out := new(PullPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReferenceObject) DeepCopyInto(out *ReferenceObject) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceObject.
func (in *ReferenceObject) DeepCopy() *ReferenceObject {
if in == nil {
return nil
}
out := new(ReferenceObject)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Partition != nil {
in, out := &in.Partition, &out.Partition
*out = new(int32)
**out = **in
}
if in.Paused != nil {
in, out := &in.Paused, &out.Paused
*out = new(bool)
**out = **in
}
if in.MaxSurge != nil {
in, out := &in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) {
*out = *in
if in.Partition != nil {
in, out := &in.Partition, &out.Partition
*out = new(int32)
**out = **in
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.UnorderedUpdate != nil {
in, out := &in.UnorderedUpdate, &out.UnorderedUpdate
*out = new(UnorderedUpdateStrategy)
(*in).DeepCopyInto(*out)
}
if in.InPlaceUpdateStrategy != nil {
in, out := &in.InPlaceUpdateStrategy, &out.InPlaceUpdateStrategy
*out = new(pub.InPlaceUpdateStrategy)
**out = **in
}
if in.MinReadySeconds != nil {
in, out := &in.MinReadySeconds, &out.MinReadySeconds
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy.
func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy {
if in == nil {
return nil
}
out := new(RollingUpdateStatefulSetStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ShareVolumePolicy) DeepCopyInto(out *ShareVolumePolicy) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShareVolumePolicy.
func (in *ShareVolumePolicy) DeepCopy() *ShareVolumePolicy {
if in == nil {
return nil
}
out := new(ShareVolumePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarContainer) DeepCopyInto(out *SidecarContainer) {
*out = *in
in.Container.DeepCopyInto(&out.Container)
out.UpgradeStrategy = in.UpgradeStrategy
out.ShareVolumePolicy = in.ShareVolumePolicy
if in.TransferEnv != nil {
in, out := &in.TransferEnv, &out.TransferEnv
*out = make([]TransferEnvVar, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarContainer.
func (in *SidecarContainer) DeepCopy() *SidecarContainer {
if in == nil {
return nil
}
out := new(SidecarContainer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarContainerUpgradeStrategy) DeepCopyInto(out *SidecarContainerUpgradeStrategy) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarContainerUpgradeStrategy.
func (in *SidecarContainerUpgradeStrategy) DeepCopy() *SidecarContainerUpgradeStrategy {
if in == nil {
return nil
}
out := new(SidecarContainerUpgradeStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarSet) DeepCopyInto(out *SidecarSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarSet.
func (in *SidecarSet) DeepCopy() *SidecarSet {
if in == nil {
return nil
}
out := new(SidecarSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SidecarSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarSetList) DeepCopyInto(out *SidecarSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]SidecarSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarSetList.
func (in *SidecarSetList) DeepCopy() *SidecarSetList {
if in == nil {
return nil
}
out := new(SidecarSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SidecarSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarSetSpec) DeepCopyInto(out *SidecarSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]SidecarContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]SidecarContainer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarSetSpec.
func (in *SidecarSetSpec) DeepCopy() *SidecarSetSpec {
if in == nil {
return nil
}
out := new(SidecarSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarSetStatus) DeepCopyInto(out *SidecarSetStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarSetStatus.
func (in *SidecarSetStatus) DeepCopy() *SidecarSetStatus {
if in == nil {
return nil
}
out := new(SidecarSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SidecarSetUpdateStrategy) DeepCopyInto(out *SidecarSetUpdateStrategy) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Partition != nil {
in, out := &in.Partition, &out.Partition
*out = new(intstr.IntOrString)
**out = **in
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.ScatterStrategy != nil {
in, out := &in.ScatterStrategy, &out.ScatterStrategy
*out = make(UpdateScatterStrategy, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarSetUpdateStrategy.
func (in *SidecarSetUpdateStrategy) DeepCopy() *SidecarSetUpdateStrategy {
if in == nil {
return nil
}
out := new(SidecarSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSet) DeepCopyInto(out *StatefulSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet.
func (in *StatefulSet) DeepCopy() *StatefulSet {
if in == nil {
return nil
}
out := new(StatefulSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]StatefulSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList.
func (in *StatefulSetList) DeepCopy() *StatefulSetList {
if in == nil {
return nil
}
out := new(StatefulSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *StatefulSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.VolumeClaimTemplates != nil {
in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates
*out = make([]v1.PersistentVolumeClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec.
func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec {
if in == nil {
return nil
}
out := new(StatefulSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]appsv1.StatefulSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus.
func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus {
if in == nil {
return nil
}
out := new(StatefulSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetTemplateSpec) DeepCopyInto(out *StatefulSetTemplateSpec) {
*out = *in
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetTemplateSpec.
func (in *StatefulSetTemplateSpec) DeepCopy() *StatefulSetTemplateSpec {
if in == nil {
return nil
}
out := new(StatefulSetTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdateStatefulSetStrategy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy.
func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy {
if in == nil {
return nil
}
out := new(StatefulSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Subset) DeepCopyInto(out *Subset) {
*out = *in
in.NodeSelectorTerm.DeepCopyInto(&out.NodeSelectorTerm)
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(intstr.IntOrString)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subset.
func (in *Subset) DeepCopy() *Subset {
if in == nil {
return nil
}
out := new(Subset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubsetTemplate) DeepCopyInto(out *SubsetTemplate) {
*out = *in
if in.StatefulSetTemplate != nil {
in, out := &in.StatefulSetTemplate, &out.StatefulSetTemplate
*out = new(StatefulSetTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.AdvancedStatefulSetTemplate != nil {
in, out := &in.AdvancedStatefulSetTemplate, &out.AdvancedStatefulSetTemplate
*out = new(AdvancedStatefulSetTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.CloneSetTemplate != nil {
in, out := &in.CloneSetTemplate, &out.CloneSetTemplate
*out = new(CloneSetTemplateSpec)
(*in).DeepCopyInto(*out)
}
if in.DeploymentTemplate != nil {
in, out := &in.DeploymentTemplate, &out.DeploymentTemplate
*out = new(DeploymentTemplateSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubsetTemplate.
func (in *SubsetTemplate) DeepCopy() *SubsetTemplate {
if in == nil {
return nil
}
out := new(SubsetTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncStatus) DeepCopyInto(out *SyncStatus) {
*out = *in
in.SyncAt.DeepCopyInto(&out.SyncAt)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatus.
func (in *SyncStatus) DeepCopy() *SyncStatus {
if in == nil {
return nil
}
out := new(SyncStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetReference) DeepCopyInto(out *TargetReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetReference.
func (in *TargetReference) DeepCopy() *TargetReference {
if in == nil {
return nil
}
out := new(TargetReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Topology) DeepCopyInto(out *Topology) {
*out = *in
if in.Subsets != nil {
in, out := &in.Subsets, &out.Subsets
*out = make([]Subset, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology.
func (in *Topology) DeepCopy() *Topology {
if in == nil {
return nil
}
out := new(Topology)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TransferEnvVar) DeepCopyInto(out *TransferEnvVar) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferEnvVar.
func (in *TransferEnvVar) DeepCopy() *TransferEnvVar {
if in == nil {
return nil
}
out := new(TransferEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeployment) DeepCopyInto(out *UnitedDeployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeployment.
func (in *UnitedDeployment) DeepCopy() *UnitedDeployment {
if in == nil {
return nil
}
out := new(UnitedDeployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UnitedDeployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeploymentCondition) DeepCopyInto(out *UnitedDeploymentCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentCondition.
func (in *UnitedDeploymentCondition) DeepCopy() *UnitedDeploymentCondition {
if in == nil {
return nil
}
out := new(UnitedDeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeploymentList) DeepCopyInto(out *UnitedDeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]UnitedDeployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentList.
func (in *UnitedDeploymentList) DeepCopy() *UnitedDeploymentList {
if in == nil {
return nil
}
out := new(UnitedDeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *UnitedDeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeploymentSpec) DeepCopyInto(out *UnitedDeploymentSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
in.Topology.DeepCopyInto(&out.Topology)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentSpec.
func (in *UnitedDeploymentSpec) DeepCopy() *UnitedDeploymentSpec {
if in == nil {
return nil
}
out := new(UnitedDeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeploymentStatus) DeepCopyInto(out *UnitedDeploymentStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
*out = new(int32)
**out = **in
}
if in.SubsetReplicas != nil {
in, out := &in.SubsetReplicas, &out.SubsetReplicas
*out = make(map[string]int32, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]UnitedDeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.UpdateStatus != nil {
in, out := &in.UpdateStatus, &out.UpdateStatus
*out = new(UpdateStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentStatus.
func (in *UnitedDeploymentStatus) DeepCopy() *UnitedDeploymentStatus {
if in == nil {
return nil
}
out := new(UnitedDeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnitedDeploymentUpdateStrategy) DeepCopyInto(out *UnitedDeploymentUpdateStrategy) {
*out = *in
if in.ManualUpdate != nil {
in, out := &in.ManualUpdate, &out.ManualUpdate
*out = new(ManualUpdate)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnitedDeploymentUpdateStrategy.
func (in *UnitedDeploymentUpdateStrategy) DeepCopy() *UnitedDeploymentUpdateStrategy {
if in == nil {
return nil
}
out := new(UnitedDeploymentUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UnorderedUpdateStrategy) DeepCopyInto(out *UnorderedUpdateStrategy) {
*out = *in
if in.PriorityStrategy != nil {
in, out := &in.PriorityStrategy, &out.PriorityStrategy
*out = new(pub.UpdatePriorityStrategy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnorderedUpdateStrategy.
func (in *UnorderedUpdateStrategy) DeepCopy() *UnorderedUpdateStrategy {
if in == nil {
return nil
}
out := new(UnorderedUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in UpdateScatterStrategy) DeepCopyInto(out *UpdateScatterStrategy) {
{
in := &in
*out = make(UpdateScatterStrategy, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateScatterStrategy.
func (in UpdateScatterStrategy) DeepCopy() UpdateScatterStrategy {
if in == nil {
return nil
}
out := new(UpdateScatterStrategy)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpdateScatterTerm) DeepCopyInto(out *UpdateScatterTerm) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateScatterTerm.
func (in *UpdateScatterTerm) DeepCopy() *UpdateScatterTerm {
if in == nil {
return nil
}
out := new(UpdateScatterTerm)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UpdateStatus) DeepCopyInto(out *UpdateStatus) {
*out = *in
if in.CurrentPartitions != nil {
in, out := &in.CurrentPartitions, &out.CurrentPartitions
*out = make(map[string]int32, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStatus.
func (in *UpdateStatus) DeepCopy() *UpdateStatus {
if in == nil {
return nil
}
out := new(UpdateStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpread) DeepCopyInto(out *WorkloadSpread) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpread.
func (in *WorkloadSpread) DeepCopy() *WorkloadSpread {
if in == nil {
return nil
}
out := new(WorkloadSpread)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadSpread) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadList) DeepCopyInto(out *WorkloadSpreadList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]WorkloadSpread, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadList.
func (in *WorkloadSpreadList) DeepCopy() *WorkloadSpreadList {
if in == nil {
return nil
}
out := new(WorkloadSpreadList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadSpreadList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadScheduleStrategy) DeepCopyInto(out *WorkloadSpreadScheduleStrategy) {
*out = *in
if in.Adaptive != nil {
in, out := &in.Adaptive, &out.Adaptive
*out = new(AdaptiveWorkloadSpreadStrategy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadScheduleStrategy.
func (in *WorkloadSpreadScheduleStrategy) DeepCopy() *WorkloadSpreadScheduleStrategy {
if in == nil {
return nil
}
out := new(WorkloadSpreadScheduleStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadSpec) DeepCopyInto(out *WorkloadSpreadSpec) {
*out = *in
if in.TargetReference != nil {
in, out := &in.TargetReference, &out.TargetReference
*out = new(TargetReference)
**out = **in
}
if in.Subsets != nil {
in, out := &in.Subsets, &out.Subsets
*out = make([]WorkloadSpreadSubset, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.ScheduleStrategy.DeepCopyInto(&out.ScheduleStrategy)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadSpec.
func (in *WorkloadSpreadSpec) DeepCopy() *WorkloadSpreadSpec {
if in == nil {
return nil
}
out := new(WorkloadSpreadSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadStatus) DeepCopyInto(out *WorkloadSpreadStatus) {
*out = *in
if in.SubsetStatuses != nil {
in, out := &in.SubsetStatuses, &out.SubsetStatuses
*out = make([]WorkloadSpreadSubsetStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadStatus.
func (in *WorkloadSpreadStatus) DeepCopy() *WorkloadSpreadStatus {
if in == nil {
return nil
}
out := new(WorkloadSpreadStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadSubset) DeepCopyInto(out *WorkloadSpreadSubset) {
*out = *in
if in.RequiredNodeSelectorTerm != nil {
in, out := &in.RequiredNodeSelectorTerm, &out.RequiredNodeSelectorTerm
*out = new(v1.NodeSelectorTerm)
(*in).DeepCopyInto(*out)
}
if in.PreferredNodeSelectorTerms != nil {
in, out := &in.PreferredNodeSelectorTerms, &out.PreferredNodeSelectorTerms
*out = make([]v1.PreferredSchedulingTerm, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.MaxReplicas != nil {
in, out := &in.MaxReplicas, &out.MaxReplicas
*out = new(intstr.IntOrString)
**out = **in
}
in.Patch.DeepCopyInto(&out.Patch)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadSubset.
func (in *WorkloadSpreadSubset) DeepCopy() *WorkloadSpreadSubset {
if in == nil {
return nil
}
out := new(WorkloadSpreadSubset)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadSubsetCondition) DeepCopyInto(out *WorkloadSpreadSubsetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadSubsetCondition.
func (in *WorkloadSpreadSubsetCondition) DeepCopy() *WorkloadSpreadSubsetCondition {
if in == nil {
return nil
}
out := new(WorkloadSpreadSubsetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadSpreadSubsetStatus) DeepCopyInto(out *WorkloadSpreadSubsetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]WorkloadSpreadSubsetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CreatingPods != nil {
in, out := &in.CreatingPods, &out.CreatingPods
*out = make(map[string]metav1.Time, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.DeletingPods != nil {
in, out := &in.DeletingPods, &out.DeletingPods
*out = make(map[string]metav1.Time, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpreadSubsetStatus.
func (in *WorkloadSpreadSubsetStatus) DeepCopy() *WorkloadSpreadSubsetStatus {
if in == nil {
return nil
}
out := new(WorkloadSpreadSubsetStatus)
in.DeepCopyInto(out)
return out
}
| |
0016_statement_stemmed_text.py
|
from django.db import migrations, models
class
|
(migrations.Migration):
dependencies = [
('django_chatterbot', '0015_statement_persona'),
]
operations = [
migrations.AddField(
model_name='statement',
name='stemmed_text',
field=models.CharField(blank=True, max_length=400),
),
]
|
Migration
|
mod.rs
|
// Copyright 2020-2021 the Deno authors. All rights reserved. MIT license.
mod reader;
mod unicode;
mod validator;
pub use validator::{EcmaRegexValidator, EcmaVersion};
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_flags() {
let validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(validator.validate_flags("gimuys"), Ok(()));
assert_eq!(validator.validate_flags("gimuy"), Ok(()));
assert_eq!(validator.validate_flags("gim"), Ok(()));
assert_eq!(validator.validate_flags("g"), Ok(()));
assert_eq!(validator.validate_flags("i"), Ok(()));
assert_eq!(validator.validate_flags("m"), Ok(()));
assert_eq!(validator.validate_flags("s"), Ok(()));
assert_eq!(validator.validate_flags("u"), Ok(()));
assert_eq!(validator.validate_flags("y"), Ok(()));
assert_eq!(validator.validate_flags("gy"), Ok(()));
assert_eq!(validator.validate_flags("iy"), Ok(()));
assert_eq!(validator.validate_flags("my"), Ok(()));
assert_eq!(validator.validate_flags("uy"), Ok(()));
}
#[test]
fn duplicate_flags() {
let validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(
validator.validate_flags("gimgu"),
Err("Duplicated flag g".to_string())
);
assert_eq!(
validator.validate_flags("migg"),
Err("Duplicated flag g".to_string())
);
assert_eq!(
validator.validate_flags("igi"),
Err("Duplicated flag i".to_string())
);
assert_eq!(
validator.validate_flags("ii"),
Err("Duplicated flag i".to_string())
);
assert_eq!(
validator.validate_flags("mm"),
Err("Duplicated flag m".to_string())
);
assert_eq!(
validator.validate_flags("ss"),
Err("Duplicated flag s".to_string())
);
assert_eq!(
validator.validate_flags("uu"),
Err("Duplicated flag u".to_string())
);
assert_eq!(
validator.validate_flags("yy"),
Err("Duplicated flag y".to_string())
);
}
#[test]
fn invalid_flags() {
let validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(
validator.validate_flags("gimuf"),
Err("Invalid flag f".to_string())
);
assert_eq!(
validator.validate_flags("gI"),
Err("Invalid flag I".to_string())
);
assert_eq!(
validator.validate_flags("a"),
Err("Invalid flag a".to_string())
);
assert_eq!(
validator.validate_flags("1"),
Err("Invalid flag 1".to_string())
);
}
#[test]
fn validate_pattern_test() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(validator.validate_pattern("", false), Ok(()));
assert_eq!(validator.validate_pattern("[abc]de|fg", false), Ok(()));
assert_eq!(validator.validate_pattern("[abc]de|fg", true), Ok(()));
assert_eq!(validator.validate_pattern("^.$", false), Ok(()));
assert_eq!(validator.validate_pattern("^.$", true), Ok(()));
assert_eq!(validator.validate_pattern("foo\\[bar", false), Ok(()));
assert_eq!(validator.validate_pattern("foo\\[bar", true), Ok(()));
assert_eq!(validator.validate_pattern("\\w+\\s", false), Ok(()));
assert_eq!(validator.validate_pattern("(\\w+), (\\w+)", false), Ok(()));
assert_eq!(
validator.validate_pattern("\\/\\/.*|\\/\\*[^]*\\*\\/", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("(\\d{1,2})-(\\d{1,2})-(\\d{4})", false),
Ok(())
);
assert_eq!(
validator.validate_pattern(
"(?:\\d{3}|\\(\\d{3}\\))([-\\/\\.])\\d{3}\\1\\d{4}",
false
),
Ok(())
);
assert_eq!(validator.validate_pattern("https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)", false), Ok(()));
assert_eq!(
validator.validate_pattern("\\p{Script=Greek}", true),
Ok(())
);
assert_eq!(validator.validate_pattern("\\p{Alphabetic}", true), Ok(()));
assert_ne!(validator.validate_pattern("\\", false), Ok(()));
assert_ne!(validator.validate_pattern("a**", false), Ok(()));
assert_ne!(validator.validate_pattern("++a", false), Ok(()));
assert_ne!(validator.validate_pattern("?a", false), Ok(()));
assert_ne!(validator.validate_pattern("a***", false), Ok(()));
assert_ne!(validator.validate_pattern("a++", false), Ok(()));
assert_ne!(validator.validate_pattern("a+++", false), Ok(()));
assert_ne!(validator.validate_pattern("a???", false), Ok(()));
assert_ne!(validator.validate_pattern("a????", false), Ok(()));
assert_ne!(validator.validate_pattern("*a", false), Ok(()));
assert_ne!(validator.validate_pattern("**a", false), Ok(()));
assert_ne!(validator.validate_pattern("+a", false), Ok(()));
assert_ne!(validator.validate_pattern("[{-z]", false), Ok(()));
assert_ne!(validator.validate_pattern("[a--z]", false), Ok(()));
assert_ne!(validator.validate_pattern("0{2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("x{1}{1,}", false), Ok(()));
assert_ne!(validator.validate_pattern("x{1,2}{1}", false), Ok(()));
assert_ne!(validator.validate_pattern("x{1,}{1}", false), Ok(()));
assert_ne!(validator.validate_pattern("x{0,1}{1,}", false), Ok(()));
assert_ne!(validator.validate_pattern("\\1(\\P{P\0[}()/", true), Ok(()));
}
#[test]
fn character_range_order() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("^[z-a]$", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-ac-e]", false), Ok(()));
assert_ne!(validator.validate_pattern("[c-eb-a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[a-dc-b]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\10b-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\ad-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\bd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\Bd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\db-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\Db-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\sb-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\Sb-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\wb-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\Wb-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\0b-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\td-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\nd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\vd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\fd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\rd-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\c0001d-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\x0061d-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0061d-G]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\10]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\b]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\B]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\d]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\D]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\s]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\S]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\w]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\W]", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-G\\0]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\t]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\n]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\v]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\f]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\r]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\c0001]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\x0061]", false), Ok(()));
assert_ne!(validator.validate_pattern("[d-G\\u0061]", false), Ok(()));
}
#[test]
fn unicode_quantifier_without_atom() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("*", true), Ok(()));
assert_ne!(validator.validate_pattern("+", true), Ok(()));
assert_ne!(validator.validate_pattern("?", true), Ok(()));
assert_ne!(validator.validate_pattern("{1}", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,}", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,2}", true), Ok(()));
assert_ne!(validator.validate_pattern("*?", true), Ok(()));
assert_ne!(validator.validate_pattern("+?", true), Ok(()));
assert_ne!(validator.validate_pattern("??", true), Ok(()));
assert_ne!(validator.validate_pattern("{1}?", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,}?", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,2}?", true), Ok(()));
}
#[test]
fn unicode_incomplete_quantifier() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("a{", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,2", true), Ok(()));
assert_ne!(validator.validate_pattern("{", true), Ok(()));
assert_ne!(validator.validate_pattern("{1", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,", true), Ok(()));
assert_ne!(validator.validate_pattern("{1,2", true), Ok(()));
}
#[test]
fn unicode_single_bracket() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("(", true), Ok(()));
assert_ne!(validator.validate_pattern(")", true), Ok(()));
assert_ne!(validator.validate_pattern("[", true), Ok(()));
assert_ne!(validator.validate_pattern("]", true), Ok(()));
assert_ne!(validator.validate_pattern("{", true), Ok(()));
assert_ne!(validator.validate_pattern("}", true), Ok(()));
}
#[test]
fn unicode_escapes() {
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(validator.validate_pattern("\\u{10ffff}", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u{110000}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u{110000}", false), Ok(()));
assert_eq!(
validator.validate_pattern("foo\\ud803\\ude6dbar", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(\u{12345}|\u{23456}).\\1", true),
Ok(())
);
assert_eq!(validator.validate_pattern("\u{12345}{3}", true), Ok(()));
// unicode escapes in character classes
assert_eq!(
validator.validate_pattern("[\\u0062-\\u0066]oo", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u0062-\\u0066]oo", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u{0062}-\\u{0066}]oo", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u{62}-\\u{00000066}]oo", true),
Ok(())
);
// invalid escapes
assert_eq!(
validator.validate_pattern("first\\u\\x\\z\\8\\9second", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u\\x\\z\\8\\9]", false),
Ok(())
);
assert_ne!(validator.validate_pattern("/\\u/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\u12/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\ufoo/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\x/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\xfoo/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\z/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\8/u", true), Ok(()));
assert_ne!(validator.validate_pattern("/\\9/u", true), Ok(()));
}
#[test]
fn basic_valid() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/visitor/full.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_eq!(validator.validate_pattern("foo", false), Ok(()));
assert_eq!(validator.validate_pattern("foo|bar", false), Ok(()));
assert_eq!(validator.validate_pattern("||||", false), Ok(()));
assert_eq!(validator.validate_pattern("^|$|\\b|\\B", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=foo)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?!)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?!foo)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a)*", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a)+", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a)?", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){}", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){a}", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){1}", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){1,}", false), Ok(()));
assert_eq!(validator.validate_pattern("(?=a){1,2}", false), Ok(()));
assert_eq!(validator.validate_pattern("a*", false), Ok(()));
assert_eq!(validator.validate_pattern("a+", false), Ok(()));
assert_eq!(validator.validate_pattern("a?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{", false), Ok(()));
assert_eq!(validator.validate_pattern("a{}", false), Ok(()));
assert_eq!(validator.validate_pattern("a{a}", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1}", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,}", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2}", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2", false), Ok(()));
assert_eq!(validator.validate_pattern("a{2,1", false), Ok(()));
assert_eq!(validator.validate_pattern("a*?", false), Ok(()));
assert_eq!(validator.validate_pattern("a+?", false), Ok(()));
assert_eq!(validator.validate_pattern("a??", false), Ok(()));
assert_eq!(validator.validate_pattern("a{?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{}?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{a}?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1}?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,}?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2}?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2?", false), Ok(()));
assert_eq!(validator.validate_pattern("a{2,1?", false), Ok(()));
assert_eq!(validator.validate_pattern("👍🚀❇️", false), Ok(()));
assert_eq!(validator.validate_pattern("^", false), Ok(()));
assert_eq!(validator.validate_pattern("$", false), Ok(()));
assert_eq!(validator.validate_pattern(".", false), Ok(()));
assert_eq!(validator.validate_pattern("]", false), Ok(()));
assert_eq!(validator.validate_pattern("{", false), Ok(()));
assert_eq!(validator.validate_pattern("}", false), Ok(()));
assert_eq!(validator.validate_pattern("|", false), Ok(()));
assert_eq!(validator.validate_pattern("${1,2", false), Ok(()));
assert_eq!(validator.validate_pattern("\\1", false), Ok(()));
assert_eq!(validator.validate_pattern("(a)\\1", false), Ok(()));
assert_eq!(validator.validate_pattern("\\1(a)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?:a)\\1", false), Ok(()));
assert_eq!(validator.validate_pattern("(a)\\2", false), Ok(()));
assert_eq!(validator.validate_pattern("(?:a)\\2", false), Ok(()));
assert_eq!(
validator.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\10", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\11", false),
Ok(())
);
assert_eq!(
validator
.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\11", false),
Ok(())
);
assert_eq!(validator.validate_pattern("(?:a)", false), Ok(()));
assert_eq!(validator.validate_pattern("\\d", false), Ok(()));
assert_eq!(validator.validate_pattern("\\D", false), Ok(()));
assert_eq!(validator.validate_pattern("\\s", false), Ok(()));
assert_eq!(validator.validate_pattern("\\S", false), Ok(()));
assert_eq!(validator.validate_pattern("\\w", false), Ok(()));
assert_eq!(validator.validate_pattern("\\W", false), Ok(()));
assert_eq!(validator.validate_pattern("\\f", false), Ok(()));
assert_eq!(validator.validate_pattern("\\n", false), Ok(()));
assert_eq!(validator.validate_pattern("\\r", false), Ok(()));
assert_eq!(validator.validate_pattern("\\t", false), Ok(()));
assert_eq!(validator.validate_pattern("\\v", false), Ok(()));
assert_eq!(validator.validate_pattern("\\cA", false), Ok(()));
assert_eq!(validator.validate_pattern("\\cz", false), Ok(()));
assert_eq!(validator.validate_pattern("\\c1", false), Ok(()));
assert_eq!(validator.validate_pattern("\\c", false), Ok(()));
assert_eq!(validator.validate_pattern("\\0", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u1", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u12", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u123", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u1234", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u12345", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{z", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{a}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{20", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{20}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{10FFFF}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{110000}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\u{00000001}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\377", false), Ok(()));
assert_eq!(validator.validate_pattern("\\400", false), Ok(()));
assert_eq!(validator.validate_pattern("\\^", false), Ok(()));
assert_eq!(validator.validate_pattern("\\$", false), Ok(()));
assert_eq!(validator.validate_pattern("\\.", false), Ok(()));
assert_eq!(validator.validate_pattern("\\+", false), Ok(()));
assert_eq!(validator.validate_pattern("\\?", false), Ok(()));
assert_eq!(validator.validate_pattern("\\(", false), Ok(()));
assert_eq!(validator.validate_pattern("\\)", false), Ok(()));
assert_eq!(validator.validate_pattern("\\[", false), Ok(()));
assert_eq!(validator.validate_pattern("\\]", false), Ok(()));
assert_eq!(validator.validate_pattern("\\{", false), Ok(()));
assert_eq!(validator.validate_pattern("\\}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\|", false), Ok(()));
assert_eq!(validator.validate_pattern("\\/", false), Ok(()));
assert_eq!(validator.validate_pattern("\\a", false), Ok(()));
assert_eq!(validator.validate_pattern("[]", false), Ok(()));
assert_eq!(validator.validate_pattern("[^-a-b-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[a]", false), Ok(()));
assert_eq!(validator.validate_pattern("[--]", false), Ok(()));
assert_eq!(validator.validate_pattern("[-a]", false), Ok(()));
assert_eq!(validator.validate_pattern("[-a-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[a-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[a-b]", false), Ok(()));
assert_eq!(validator.validate_pattern("[-a-b-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[---]", false), Ok(()));
assert_eq!(validator.validate_pattern("[a-b--/]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\b-\\n]", false), Ok(()));
assert_eq!(validator.validate_pattern("[b\\-a]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\d]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\D]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\s]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\S]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\w]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\W]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\f]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\n]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\r]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\t]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\v]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\cA]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\cz]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\c1]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\c]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\0]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\x]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\xz]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\x1]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\x12]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\x123]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u1]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u12]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u123]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u1234]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u12345]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{z]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{a}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{20]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{20}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{10FFFF}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{110000}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{00000001}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\77]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\377]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\400]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\^]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\$]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\.]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\+]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\?]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\(]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\)]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\[]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\]]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\{]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\|]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\/]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\a]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\d-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\D-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\s-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\S-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\w-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\W-\\uFFFF]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\d]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\D]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\s]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\S]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\w]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\u0000-\\W]", false), Ok(()));
assert_eq!(
validator.validate_pattern("[\\u0000-\\u0001]", false),
Ok(())
);
assert_eq!(validator.validate_pattern("[\\u{2-\\u{1}]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\a-\\z]", false), Ok(()));
assert_eq!(validator.validate_pattern("[0-9--/]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\c0-]", false), Ok(()));
assert_eq!(validator.validate_pattern("[\\c_]", false), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9]*$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9]+$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[a-zA-Z]*$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[a-zA-Z]+$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9a-zA-Z]*$", false), Ok(()));
assert_eq!(
validator.validate_pattern("^[a-zA-Z0-9!-/:-@\\[-`{-~]*$", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([a-zA-Z0-9]{8,})$", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([a-zA-Z0-9]{6,8})$", false),
Ok(())
);
assert_eq!(validator.validate_pattern("^([0-9]{0,8})$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9]{8}$", false), Ok(()));
assert_eq!(validator.validate_pattern("^https?:\\/\\/", false), Ok(()));
assert_eq!(validator.validate_pattern("^\\d{3}-\\d{4}$", false), Ok(()));
assert_eq!(
validator.validate_pattern("^\\d{1,3}(.\\d{1,3}){3}$", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([1-9][0-9]*|0)(\\.[0-9]+)?$", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("^-?([1-9][0-9]*|0)(\\.[0-9]+)?$", false),
Ok(())
);
assert_eq!(validator.validate_pattern("^[ぁ-んー]*$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[ァ-ンヴー]*$", false), Ok(()));
assert_eq!(validator.validate_pattern("^[ァ-ン゙゚\\-]*$", false), Ok(()));
assert_eq!(
validator.validate_pattern("^[^\\x20-\\x7e]*$", false),
Ok(())
);
assert_eq!(
validator.validate_pattern(
"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$",
false
),
Ok(())
);
assert_eq!(validator.validate_pattern("^((4\\d{3})|(5[1-5]\\d{2})|(6011))([- ])?\\d{4}([- ])?\\d{4}([- ])?\\d{4}|3[4,7]\\d{13}$", false), Ok(()));
assert_eq!(validator.validate_pattern("^\\s*|\\s*$", false), Ok(()));
assert_eq!(
validator.validate_pattern("[\\d][\\12-\\14]{1,}[^\\d]", false),
Ok(())
);
assert_eq!(validator.validate_pattern("([a ]\\b)*\\b", false), Ok(()));
assert_eq!(validator.validate_pattern("foo", true), Ok(()));
assert_eq!(validator.validate_pattern("foo|bar", true), Ok(()));
assert_eq!(validator.validate_pattern("||||", true), Ok(()));
assert_eq!(validator.validate_pattern("^|$|\\b|\\B", true), Ok(()));
assert_eq!(validator.validate_pattern("(?=)", true), Ok(()));
assert_eq!(validator.validate_pattern("(?=foo)", true), Ok(()));
assert_eq!(validator.validate_pattern("(?!)", true), Ok(()));
assert_eq!(validator.validate_pattern("(?!foo)", true), Ok(()));
assert_eq!(validator.validate_pattern("a*", true), Ok(()));
assert_eq!(validator.validate_pattern("a+", true), Ok(()));
assert_eq!(validator.validate_pattern("a?", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1}", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1,}", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2}", true), Ok(()));
assert_eq!(validator.validate_pattern("a*?", true), Ok(()));
assert_eq!(validator.validate_pattern("a+?", true), Ok(()));
assert_eq!(validator.validate_pattern("a??", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1}?", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1,}?", true), Ok(()));
assert_eq!(validator.validate_pattern("a{1,2}?", true), Ok(()));
assert_eq!(validator.validate_pattern("👍🚀❇️", true), Ok(()));
assert_eq!(validator.validate_pattern("^", true), Ok(()));
assert_eq!(validator.validate_pattern("$", true), Ok(()));
assert_eq!(validator.validate_pattern(".", true), Ok(()));
assert_eq!(validator.validate_pattern("|", true), Ok(()));
assert_eq!(validator.validate_pattern("(a)\\1", true), Ok(()));
assert_eq!(validator.validate_pattern("\\1(a)", true), Ok(()));
assert_eq!(
validator.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\10", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\11", true),
Ok(())
);
assert_eq!(validator.validate_pattern("(?:a)", true), Ok(()));
assert_eq!(validator.validate_pattern("\\d", true), Ok(()));
assert_eq!(validator.validate_pattern("\\D", true), Ok(()));
assert_eq!(validator.validate_pattern("\\s", true), Ok(()));
assert_eq!(validator.validate_pattern("\\S", true), Ok(()));
assert_eq!(validator.validate_pattern("\\w", true), Ok(()));
assert_eq!(validator.validate_pattern("\\W", true), Ok(()));
assert_eq!(validator.validate_pattern("\\f", true), Ok(()));
assert_eq!(validator.validate_pattern("\\n", true), Ok(()));
assert_eq!(validator.validate_pattern("\\r", true), Ok(()));
assert_eq!(validator.validate_pattern("\\t", true), Ok(()));
assert_eq!(validator.validate_pattern("\\v", true), Ok(()));
assert_eq!(validator.validate_pattern("\\cA", true), Ok(()));
assert_eq!(validator.validate_pattern("\\cz", true), Ok(()));
assert_eq!(validator.validate_pattern("\\0", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u1234", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u12345", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u{a}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u{20}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u{10FFFF}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\u{00000001}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\^", true), Ok(()));
assert_eq!(validator.validate_pattern("\\$", true), Ok(()));
assert_eq!(validator.validate_pattern("\\.", true), Ok(()));
assert_eq!(validator.validate_pattern("\\+", true), Ok(()));
assert_eq!(validator.validate_pattern("\\?", true), Ok(()));
assert_eq!(validator.validate_pattern("\\(", true), Ok(()));
assert_eq!(validator.validate_pattern("\\)", true), Ok(()));
assert_eq!(validator.validate_pattern("\\[", true), Ok(()));
assert_eq!(validator.validate_pattern("\\]", true), Ok(()));
assert_eq!(validator.validate_pattern("\\{", true), Ok(()));
assert_eq!(validator.validate_pattern("\\}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\|", true), Ok(()));
assert_eq!(validator.validate_pattern("\\/", true), Ok(()));
assert_eq!(validator.validate_pattern("[]", true), Ok(()));
assert_eq!(validator.validate_pattern("[^-a-b-]", true), Ok(()));
assert_eq!(validator.validate_pattern("[-]", true), Ok(()));
assert_eq!(validator.validate_pattern("[a]", true), Ok(()));
assert_eq!(validator.validate_pattern("[--]", true), Ok(()));
assert_eq!(validator.validate_pattern("[-a]", true), Ok(()));
assert_eq!(validator.validate_pattern("[-a-]", true), Ok(()));
assert_eq!(validator.validate_pattern("[a-]", true), Ok(()));
assert_eq!(validator.validate_pattern("[a-b]", true), Ok(()));
assert_eq!(validator.validate_pattern("[-a-b-]", true), Ok(()));
assert_eq!(validator.validate_pattern("[---]", true), Ok(()));
assert_eq!(validator.validate_pattern("[a-b--/]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\b-\\n]", true), Ok(()));
assert_eq!(validator.validate_pattern("[b\\-a]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\d]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\D]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\s]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\S]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\w]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\W]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\f]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\n]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\r]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\t]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\v]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\cA]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\cz]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\0]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\x12]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\x123]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u1234]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u12345]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{a}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{20}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{10FFFF}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\u{00000001}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\^]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\$]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\.]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\+]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\?]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\(]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\)]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\[]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\]]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\{]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\|]", true), Ok(()));
assert_eq!(validator.validate_pattern("[\\/]", true), Ok(()));
assert_eq!(
validator.validate_pattern("[\\u0000-\\u0001]", true),
Ok(())
);
assert_eq!(validator.validate_pattern("[\\u{1}-\\u{2}]", true), Ok(()));
assert_eq!(validator.validate_pattern("[0-9--/]", true), Ok(()));
assert_eq!(validator.validate_pattern("[🌷-🌸]", true), Ok(()));
assert_eq!(
validator.validate_pattern("[\\u0000-🌸-\\u0000]", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u0000-\\u{1f338}-\\u0000]", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\u0000-\\ud83c\\udf38-\\u0000]", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("[\\uD834\\uDF06-\\uD834\\uDF08a-z]", true),
Ok(())
);
assert_eq!(validator.validate_pattern("^[0-9]*$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9]+$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[a-zA-Z]*$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[a-zA-Z]+$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9a-zA-Z]*$", true), Ok(()));
assert_eq!(
validator.validate_pattern("^[a-zA-Z0-9!-/:-@\\[-`{-~]*$", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([a-zA-Z0-9]{8,})$", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([a-zA-Z0-9]{6,8})$", true),
Ok(())
);
assert_eq!(validator.validate_pattern("^([0-9]{0,8})$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[0-9]{8}$", true), Ok(()));
assert_eq!(validator.validate_pattern("^https?:\\/\\/", true), Ok(()));
assert_eq!(validator.validate_pattern("^\\d{3}-\\d{4}$", true), Ok(()));
assert_eq!(
validator.validate_pattern("^\\d{1,3}(.\\d{1,3}){3}$", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("^([1-9][0-9]*|0)(\\.[0-9]+)?$", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("^-?([1-9][0-9]*|0)(\\.[0-9]+)?$", true),
Ok(())
);
assert_eq!(validator.validate_pattern("^[ぁ-んー]*$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[ァ-ンヴー]*$", true), Ok(()));
assert_eq!(validator.validate_pattern("^[ァ-ン゙゚\\-]*$", true), Ok(()));
assert_eq!(
validator.validate_pattern("^[^\\x20-\\x7e]*$", true),
Ok(())
);
assert_eq!(
validator.validate_pattern(
"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$",
true
),
Ok(())
);
assert_eq!(validator.validate_pattern("^((4\\d{3})|(5[1-5]\\d{2})|(6011))([- ])?\\d{4}([- ])?\\d{4}([- ])?\\d{4}|3[4,7]\\d{13}$", true), Ok(()));
assert_eq!(validator.validate_pattern("^\\s*|\\s*$", true), Ok(()));
assert_eq!(validator.validate_pattern("(?<=a)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<=a)", true), Ok(()));
assert_eq!(validator.validate_pattern("(?<!a)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<!a)", true), Ok(()));
assert_eq!(
validator.validate_pattern("(?<=(?<a>\\w){3})f", true),
Ok(())
);
assert_eq!(validator.validate_pattern("((?<=\\w{3}))f", true), Ok(()));
assert_eq!(
validator.validate_pattern("(?<a>(?<=\\w{3}))f", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<!(?<a>\\d){3})f", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<!(?<a>\\D){3})f|f", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<a>(?<!\\D{3}))f|f", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<=(?<a>\\w){3})f", false),
Ok(())
);
assert_eq!(validator.validate_pattern("((?<=\\w{3}))f", false), Ok(()));
assert_eq!(
validator.validate_pattern("(?<a>(?<=\\w{3}))f", false),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<!(?<a>\\d){3})f", false),
Ok(())
|
);
assert_eq!(
validator.validate_pattern("(?<=(?<fst>.)|(?<snd>.))", true),
Ok(())
);
assert_eq!(validator.validate_pattern("(a)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>)", false), Ok(()));
assert_eq!(validator.validate_pattern("\\k", false), Ok(()));
assert_eq!(validator.validate_pattern("\\k<a>", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)\\k<a>", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)\\k<a>", true), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)\\1", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)\\1", true), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)\\2", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)(?<b>a)", false), Ok(()));
assert_eq!(validator.validate_pattern("(?<a>a)(?<b>a)", true), Ok(()));
assert_eq!(validator.validate_pattern("\\k<a>(?<a>a)", false), Ok(()));
assert_eq!(validator.validate_pattern("\\k<a>(?<a>a)", true), Ok(()));
assert_eq!(validator.validate_pattern("\\1(?<a>a)", false), Ok(()));
assert_eq!(validator.validate_pattern("\\1(?<a>a)", true), Ok(()));
assert_eq!(
validator.validate_pattern("(?<$abc>a)\\k<$abc>", true),
Ok(())
);
assert_eq!(validator.validate_pattern("(?<あ>a)\\k<あ>", true), Ok(()));
assert_eq!(
validator.validate_pattern("(?<𠮷>a)\\k<\\u{20bb7}>", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<\\uD842\\uDFB7>a)\\k<\\u{20bb7}>", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<\\u{20bb7}>a)\\k<\\uD842\\uDFB7>", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<abc>a)\\k<\\u0061\\u0062\\u0063>", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("(?<\\u0061\\u0062\\u0063>a)\\k<abc>", true),
Ok(())
);
assert_eq!(
validator.validate_pattern(
"(?<\\u0061\\u0062\\u0063>a)\\k<\\u{61}\\u{62}\\u{63}>",
true
),
Ok(())
);
assert_eq!(validator.validate_pattern("(?<a1>a)\\k<a1>", true), Ok(()));
assert_eq!(validator.validate_pattern("\\p", false), Ok(()));
assert_eq!(validator.validate_pattern("\\p{", false), Ok(()));
assert_eq!(validator.validate_pattern("\\p{ASCII", false), Ok(()));
assert_eq!(validator.validate_pattern("\\p{ASCII}", false), Ok(()));
assert_eq!(validator.validate_pattern("\\p{ASCII}", true), Ok(()));
assert_eq!(validator.validate_pattern("\\p{Emoji}", true), Ok(()));
assert_eq!(
validator.validate_pattern("\\p{General_Category=Letter}", true),
Ok(())
);
assert_eq!(
validator.validate_pattern("\\p{Script=Hiragana}", true),
Ok(())
);
assert_eq!(
validator.validate_pattern(
"[\\p{Script=Hiragana}\\-\\p{Script=Katakana}]",
true
),
Ok(())
);
assert_eq!(validator.validate_pattern("\\P{Letter}", true), Ok(()));
}
#[test]
fn basic_invalid() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/basic-invalid.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES5);
assert_ne!(validator.validate_pattern("(", false), Ok(()));
assert_ne!(validator.validate_pattern("(?", false), Ok(()));
assert_ne!(validator.validate_pattern("(?=", false), Ok(()));
assert_ne!(validator.validate_pattern("(?=foo", false), Ok(()));
assert_ne!(validator.validate_pattern("(?!", false), Ok(()));
assert_ne!(validator.validate_pattern("(?!foo", false), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("(a{2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}?", false), Ok(()));
assert_ne!(validator.validate_pattern("(*)", false), Ok(()));
assert_ne!(validator.validate_pattern("+", false), Ok(()));
assert_ne!(validator.validate_pattern("?", false), Ok(()));
assert_ne!(validator.validate_pattern(")", false), Ok(()));
assert_ne!(validator.validate_pattern("[", false), Ok(()));
assert_ne!(validator.validate_pattern("^*", false), Ok(()));
assert_ne!(validator.validate_pattern("$*", false), Ok(()));
assert_ne!(validator.validate_pattern("${1,2}", false), Ok(()));
assert_ne!(validator.validate_pattern("${2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("\\2(a)(", false), Ok(()));
assert_ne!(validator.validate_pattern("(?a", false), Ok(()));
assert_ne!(validator.validate_pattern("(?a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?:", false), Ok(()));
assert_ne!(validator.validate_pattern("(?:a", false), Ok(()));
assert_ne!(validator.validate_pattern("(:a", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[a-b--+]", false), Ok(()));
assert_ne!(
validator.validate_pattern("[\\u0001-\\u0000]", false),
Ok(())
);
assert_ne!(validator.validate_pattern("[\\u{1}-\\u{2}]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{2}-\\u{1}]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\z-\\a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[0-9--+]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\c-a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[🌷-🌸]", false), Ok(()));
assert_ne!(validator.validate_pattern("[🌸-🌷]", false), Ok(()));
assert_ne!(
validator.validate_pattern("[\\uD834\\uDF06-\\uD834\\uDF08a-z]", false),
Ok(())
);
}
#[test]
fn basic_invalid_2015() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/basic-invalid-2015.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2015);
assert_ne!(validator.validate_pattern("(", false), Ok(()));
assert_ne!(validator.validate_pattern("(?", false), Ok(()));
assert_ne!(validator.validate_pattern("(?=", false), Ok(()));
assert_ne!(validator.validate_pattern("(?=foo", false), Ok(()));
assert_ne!(validator.validate_pattern("(?!", false), Ok(()));
assert_ne!(validator.validate_pattern("(?!foo", false), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("(a{2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}?", false), Ok(()));
assert_ne!(validator.validate_pattern("(*)", false), Ok(()));
assert_ne!(validator.validate_pattern("+", false), Ok(()));
assert_ne!(validator.validate_pattern("?", false), Ok(()));
assert_ne!(validator.validate_pattern(")", false), Ok(()));
assert_ne!(validator.validate_pattern("[", false), Ok(()));
assert_ne!(validator.validate_pattern("^*", false), Ok(()));
assert_ne!(validator.validate_pattern("$*", false), Ok(()));
assert_ne!(validator.validate_pattern("${1,2}", false), Ok(()));
assert_ne!(validator.validate_pattern("${2,1}", false), Ok(()));
assert_ne!(validator.validate_pattern("\\2(a)(", false), Ok(()));
assert_ne!(validator.validate_pattern("(?a", false), Ok(()));
assert_ne!(validator.validate_pattern("(?a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?:", false), Ok(()));
assert_ne!(validator.validate_pattern("(?:a", false), Ok(()));
assert_ne!(validator.validate_pattern("(:a", false), Ok(()));
assert_ne!(validator.validate_pattern("[b-a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[a-b--+]", false), Ok(()));
assert_ne!(
validator.validate_pattern("[\\u0001-\\u0000]", false),
Ok(())
);
assert_ne!(validator.validate_pattern("[\\u{1}-\\u{2}]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{2}-\\u{1}]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\z-\\a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[0-9--+]", false), Ok(()));
assert_ne!(validator.validate_pattern("[\\c-a]", false), Ok(()));
assert_ne!(validator.validate_pattern("[🌷-🌸]", false), Ok(()));
assert_ne!(
validator.validate_pattern("[\\u0000-🌸-\\u0000]", false),
Ok(())
);
assert_ne!(
validator.validate_pattern("[\\u0000-\\ud83c\\udf38-\\u0000]", false),
Ok(())
);
assert_ne!(validator.validate_pattern("[🌸-🌷]", false), Ok(()));
assert_ne!(
validator.validate_pattern("[\\uD834\\uDF06-\\uD834\\uDF08a-z]", false),
Ok(())
);
}
#[test]
fn basic_invalid_2015_unicode() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/basic-invalid-2015-u.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2015);
assert_ne!(validator.validate_pattern("(", true), Ok(()));
assert_ne!(validator.validate_pattern("(?", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=foo", true), Ok(()));
assert_ne!(validator.validate_pattern("(?!", true), Ok(()));
assert_ne!(validator.validate_pattern("(?!foo", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a)*", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a)+", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a)?", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){}", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){a}", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){1}", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){1,}", true), Ok(()));
assert_ne!(validator.validate_pattern("(?=a){1,2}", true), Ok(()));
assert_ne!(validator.validate_pattern("a{", true), Ok(()));
assert_ne!(validator.validate_pattern("a{}", true), Ok(()));
assert_ne!(validator.validate_pattern("a{a}", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,2", true), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}", true), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1", true), Ok(()));
assert_ne!(validator.validate_pattern("(a{2,1}", true), Ok(()));
assert_ne!(validator.validate_pattern("a{?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{}?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{a}?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{1,2?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1}?", true), Ok(()));
assert_ne!(validator.validate_pattern("a{2,1?", true), Ok(()));
assert_ne!(validator.validate_pattern("(*)", true), Ok(()));
assert_ne!(validator.validate_pattern("+", true), Ok(()));
assert_ne!(validator.validate_pattern("?", true), Ok(()));
assert_ne!(validator.validate_pattern(")", true), Ok(()));
assert_ne!(validator.validate_pattern("[", true), Ok(()));
assert_ne!(validator.validate_pattern("]", true), Ok(()));
assert_ne!(validator.validate_pattern("{", true), Ok(()));
assert_ne!(validator.validate_pattern("}", true), Ok(()));
assert_ne!(validator.validate_pattern("^*", true), Ok(()));
assert_ne!(validator.validate_pattern("$*", true), Ok(()));
assert_ne!(validator.validate_pattern("${1,2", true), Ok(()));
assert_ne!(validator.validate_pattern("${1,2}", true), Ok(()));
assert_ne!(validator.validate_pattern("${2,1}", true), Ok(()));
assert_ne!(validator.validate_pattern("\\1", true), Ok(()));
assert_ne!(validator.validate_pattern("\\2(a)(", true), Ok(()));
assert_ne!(validator.validate_pattern("(?:a)\\1", true), Ok(()));
assert_ne!(validator.validate_pattern("(a)\\2", true), Ok(()));
assert_ne!(validator.validate_pattern("(?:a)\\2", true), Ok(()));
assert_ne!(
validator.validate_pattern("(a)(a)(a)(a)(a)(a)(a)(a)(a)(a)\\11", true),
Ok(())
);
assert_ne!(validator.validate_pattern("(?a", true), Ok(()));
assert_ne!(validator.validate_pattern("(?a)", true), Ok(()));
assert_ne!(validator.validate_pattern("(?:", true), Ok(()));
assert_ne!(validator.validate_pattern("(?:a", true), Ok(()));
assert_ne!(validator.validate_pattern("(:a", true), Ok(()));
assert_ne!(validator.validate_pattern("\\c1", true), Ok(()));
assert_ne!(validator.validate_pattern("\\c", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u1", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u12", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u123", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u{", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u{z", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u{20", true), Ok(()));
assert_ne!(validator.validate_pattern("\\u{110000}", true), Ok(()));
assert_ne!(validator.validate_pattern("\\377", true), Ok(()));
assert_ne!(validator.validate_pattern("\\400", true), Ok(()));
assert_ne!(validator.validate_pattern("\\a", true), Ok(()));
assert_ne!(validator.validate_pattern("[b-a]", true), Ok(()));
assert_ne!(validator.validate_pattern("[a-b--+]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\c1]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\c]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\x]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\xz]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\x1]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u1]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u12]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u123]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{z]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{20]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{110000}]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\77]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\377]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\400]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\a]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\d-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\D-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\s-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\S-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\w-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\W-\\uFFFF]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\d]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\D]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\s]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\S]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\w]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u0000-\\W]", true), Ok(()));
assert_ne!(
validator.validate_pattern("[\\u0001-\\u0000]", true),
Ok(())
);
assert_ne!(validator.validate_pattern("[\\u{2}-\\u{1}]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\u{2-\\u{1}]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\a-\\z]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\z-\\a]", true), Ok(()));
assert_ne!(validator.validate_pattern("[0-9--+]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\c-a]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\c0-]", true), Ok(()));
assert_ne!(validator.validate_pattern("[\\c_]", true), Ok(()));
assert_ne!(validator.validate_pattern("[🌸-🌷]", true), Ok(()));
assert_ne!(
validator.validate_pattern("[\\d][\\12-\\14]{1,}[^\\d]", true),
Ok(())
);
}
#[test]
fn lookbehind_assertion_invalid_2017() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/lookbehind-assertion-invalid-2017.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2017);
assert_ne!(validator.validate_pattern("(?<a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a)", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)", true), Ok(()));
}
#[test]
fn lookbehind_assertion_invalid_2018() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/lookbehind-assertion-invalid-2018.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("(?<a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a)", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)?", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)?", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)+", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)+", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)*", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a)*", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a){1}", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<=a){1}", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)?", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)?", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)+", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)+", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)*", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a)*", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a){1}", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<!a){1}", true), Ok(()));
}
#[test]
fn named_capturing_group_invalid_2017() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/named-capturing-group-invalid-2017.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2017);
assert_ne!(validator.validate_pattern("\\k", true), Ok(()));
assert_ne!(validator.validate_pattern("\\k<a>", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a>", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a>", true), Ok(()));
}
#[test]
fn named_capturing_group_invalid_2018() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/named-capturing-group-invalid-2018.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("(?a", false), Ok(()));
assert_ne!(validator.validate_pattern("(?a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a)", false), Ok(()));
assert_ne!(validator.validate_pattern("\\k", true), Ok(()));
assert_ne!(validator.validate_pattern("\\k<a>", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<a", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\2", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<b>", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)\\k<b>", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)(?<a>a)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<a>a)(?<a>a)", true), Ok(()));
assert_ne!(
validator.validate_pattern("(?<a>a)(?<\\u{61}>a)", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("(?<a>a)(?<\\u0061>a)", true),
Ok(())
);
assert_ne!(validator.validate_pattern("(?<☀>a)\\k<☀>", true), Ok(()));
assert_ne!(
validator.validate_pattern("(?<\\u0020>a)\\k<\\u0020>", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("(?<\\u0061\\u0062\\u0063>a)\\k<abd>", true),
Ok(())
);
assert_ne!(validator.validate_pattern("(?<11>a)\\k<11>", true), Ok(()));
}
#[test]
fn unicode_group_names_invalid_2020() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/unicode-group-names-invalid.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2020);
assert_ne!(
validator.validate_pattern("(?<\\ud83d\\ude80>.)", false),
Ok(())
);
assert_ne!(
validator.validate_pattern("(?<\\ud83d\\ude80>.)", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("(?<\\u{1f680}>.)", false),
Ok(())
);
assert_ne!(validator.validate_pattern("(?<\\u{1f680}>.)", true), Ok(()));
assert_ne!(validator.validate_pattern("(?<🚀>.)", false), Ok(()));
assert_ne!(validator.validate_pattern("(?<🚀>.)", true), Ok(()));
}
#[test]
fn unicode_property_escape_invalid_2017() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/unicode-property-escape-invalid-2017.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2017);
assert_ne!(validator.validate_pattern("\\p", true), Ok(()));
assert_ne!(validator.validate_pattern("\\p{", true), Ok(()));
assert_ne!(validator.validate_pattern("\\p{ASCII", true), Ok(()));
assert_ne!(validator.validate_pattern("\\p{ASCII}", true), Ok(()));
}
#[test]
fn unicode_property_escape_invalid_2018() {
// source: https://github.com/mysticatea/regexpp/blob/master/test/fixtures/parser/literal/unicode-property-escape-invalid-2018.json
let mut validator = EcmaRegexValidator::new(EcmaVersion::ES2018);
assert_ne!(validator.validate_pattern("\\p", true), Ok(()));
assert_ne!(validator.validate_pattern("\\p{", true), Ok(()));
assert_ne!(validator.validate_pattern("\\p{ASCII", true), Ok(()));
assert_ne!(
validator.validate_pattern("\\p{General_Category}", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("\\p{General_Category=}", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("\\p{General_Category", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("\\p{General_Category=", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("\\p{General_Category=Letter", true),
Ok(())
);
assert_ne!(
validator.validate_pattern("\\p{General_Category=Hiragana}", true),
Ok(())
);
assert_ne!(
validator
.validate_pattern("[\\p{Script=Hiragana}-\\p{Script=Katakana}]", true),
Ok(())
);
}
}
|
);
assert_eq!(
validator.validate_pattern("(?<a>(?<!\\D{3}))f|f", false),
Ok(())
|
480p_352.ts
|
version https://git-lfs.github.com/spec/v1
oid sha256:d66cdf8f225191e7b25ebef02781c9330cb5ef2381e7497b2da43bcbe327051d
|
size 557608
|
|
Random Point in Non-overlapping Rectangles.py
|
# Random Point in Non-overlapping Rectangles
'''
Given a list of non-overlapping axis-aligned rectangles rects, write a function pick which randomly and uniformily picks an integer point in the space covered by the rectangles.
Note:
An integer point is a point that has integer coordinates.
A point on the perimeter of a rectangle is included in the space covered by the rectangles.
ith rectangle = rects[i] = [x1,y1,x2,y2], where [x1, y1] are the integer coordinates of the bottom-left corner, and [x2, y2] are the integer coordinates of the top-right corner.
length and width of each rectangle does not exceed 2000.
1 <= rects.length <= 100
pick return a point as an array of integer coordinates [p_x, p_y]
pick is called at most 10000 times.
Example 1:
Input:
["Solution","pick","pick","pick"]
[[[[1,1,5,5]]],[],[],[]]
Output:
[null,[4,1],[4,1],[3,3]]
Example 2:
Input:
["Solution","pick","pick","pick","pick","pick"]
[[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
Output:
[null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
Explanation of Input Syntax:
The input is two lists: the subroutines called and their arguments. Solution's constructor has one argument, the array of rectangles rects.
pick has no arguments. Arguments are always wrapped with a list, even if there aren't any.
'''
import random
class Solution:
|
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
|
def __init__(self, rects: List[List[int]]):
self.rects = rects
self.weights = []
s = 0
for x1, y1, x2, y2 in rects:
w = (x2-x1+1)*(y2-y1+1)
self.weights.append(w)
s+=w
self.weights = [x/s for x in self.weights]
print(self.weights)
def pick(self) -> List[int]:
rectangle = random.choices(population = self.rects, weights = self.weights, k=1)[0]
x1, y1, x2, y2 = rectangle
return [random.randint(x1,x2), random.randint(y1,y2)]
|
pair_test.go
|
package ice
import (
"fmt"
"net"
"sort"
"testing"
)
func TestPairPriority(t *testing.T) {
for _, tc := range []struct {
G, D int
Value int64
}{
{0, 0, 0},
{1, 1, 4294967298},
{1, 2, 4294967300},
{2, 1, 4294967301},
} {
t.Run(fmt.Sprintf("%d_%d", tc.G, tc.D), func(t *testing.T) {
if v := PairPriority(tc.G, tc.D); v != tc.Value {
t.Errorf("%d (got) != %d (expected)", v, tc.Value)
}
})
}
}
func TestPair_Foundation(t *testing.T) {
p := Pair{
Local: Candidate{
Foundation: make([]byte, foundationLength),
},
Remote: Candidate{
Foundation: make([]byte, foundationLength),
},
}
p.SetFoundation()
f := p.Foundation
if len(f) != foundationLength*2 {
t.Error("bad length")
}
}
func TestPairs(t *testing.T) {
pairs := Pairs{
{Priority: 4},
{Priority: 3},
{Priority: 100},
{Priority: 0, ComponentID: 2},
|
{Priority: 4},
{Priority: 5},
{Priority: 9},
{Priority: 8},
}
sort.Sort(pairs)
expectedOrder := []struct {
priority int64
component int
}{
{100, 0},
{9, 0},
{8, 0},
{5, 0},
{4, 0},
{4, 0},
{3, 0},
{0, 1},
{0, 2},
}
for i, p := range pairs {
if p.Priority != expectedOrder[i].priority {
t.Errorf("p[%d]: %d (got) != %d (expected)", i, p.Priority, expectedOrder[i])
}
if p.ComponentID != expectedOrder[i].component {
t.Errorf("p[%d] component: %d (got) != %d (expected)", i, p.Priority, expectedOrder[i])
}
}
}
func TestNewPairs(t *testing.T) {
for _, tc := range []struct {
Name string
Local Candidates
Remote Candidates
Result Pairs
}{
{
Name: "Blank",
},
{
Name: "No pairs",
Local: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
},
Remote: Candidates{
{
Addr: Addr{
IP: net.ParseIP("2001:11:12:13:14:15:16:17"),
},
},
},
},
{
Name: "Simple",
Local: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
},
Remote: Candidates{
{
Addr: Addr{
IP: net.ParseIP("1.1.1.2"),
},
},
},
Result: Pairs{
{
Local: Candidate{
Addr: Addr{
IP: net.ParseIP("1.1.1.1"),
},
},
Remote: Candidate{
Addr: Addr{
IP: net.ParseIP("1.1.1.2"),
},
},
},
},
},
} {
t.Run(tc.Name, func(t *testing.T) {
got := NewPairs(tc.Local, tc.Remote)
if len(got) != len(tc.Result) {
t.Fatalf("bad length: %d (got) != %d (expected)", len(got), len(tc.Result))
}
for i := range tc.Result {
expectedAddr := tc.Result[i].Remote.Addr
gotAddr := got[i].Remote.Addr
if !gotAddr.Equal(expectedAddr) {
t.Errorf("[%d]: remote addr mismatch: %s (got) != %s (expected)", i, gotAddr, expectedAddr)
}
expectedAddr = tc.Result[i].Local.Addr
gotAddr = got[i].Local.Addr
if !gotAddr.Equal(expectedAddr) {
t.Errorf("[%d]: local addr mismatch: %s (got) != %s (expected)", i, gotAddr, expectedAddr)
}
}
})
}
}
|
{Priority: 0, ComponentID: 1},
|
face_recognition_svm.py
|
# Train multiple images per person
# Find and recognize faces in an image using a SVC with scikit-learn
"""
Structure:
<test_image>.jpg
<train_dir>/
<person_1>/
<person_1_face-1>.jpg
<person_1_face-2>.jpg
.
.
<person_1_face-n>.jpg
<person_2>/
<person_2_face-1>.jpg
<person_2_face-2>.jpg
.
.
<person_2_face-n>.jpg
.
.
<person_n>/
<person_n_face-1>.jpg
<person_n_face-2>.jpg
.
.
<person_n_face-n>.jpg
"""
import face_recognition
from sklearn import svm
import os
# Training the SVC classifier
# The training data would be all the face encodings from all the known images and the labels are their names
|
train_dir = os.listdir('/train_dir/')
# Loop through each person in the training directory
for person in train_dir:
pix = os.listdir("/train_dir/" + person)
# Loop through each training image for the current person
for person_img in pix:
# Get the face encodings for the face in each image file
face = face_recognition.load_image_file("/train_dir/" + person + "/" + person_img)
face_bounding_boxes = face_recognition.face_locations(face)
#If training image contains none or more than faces, print an error message and exit
if len(face_bounding_boxes) != 1:
print(person + "/" + person_img + " contains none or more than one faces and can't be used for training.")
exit()
else:
face_enc = face_recognition.face_encodings(face)[0]
# Add face encoding for current image with corresponding label (name) to the training data
encodings.append(face_enc)
names.append(person)
# Create and train the SVC classifier
clf = svm.SVC(gamma='scale')
clf.fit(encodings,names)
# Load the test image with unknown faces into a numpy array
test_image = face_recognition.load_image_file('test_image.jpg')
# Find all the faces in the test image using the default HOG-based model
face_locations = face_recognition.face_locations(test_image)
no = len(face_locations)
print("Number of faces detected: ", no)
# Predict all the faces in the test image using the trained classifier
print("Found:")
for i in range(no):
test_image_enc = face_recognition.face_encodings(test_image)[i]
name = clf.predict([test_image_enc])
print(*name)
|
encodings = []
names = []
# Training directory
|
MetaTag.type.ts
|
export type MetaTag = {
name: string;
content: string;
|
};
| |
DelSpace.go
|
package String
import (
"regexp"
"strings"
)
// DelSpace 移除字符串中的空白字符.
// all为true时移除全部空白,为false时只替换连续的空白字符为一个空格.
func (*String)DelSpace(str string, all bool) string {
if all && str != "" {
return strings.Join(strings.Fields(str), "")
} else if str != "" {
//先将2个以上的连续空白符转为空格
str = regexp.MustCompile(`[[:space:]]{2,}|[\s\p{Zs}]{2,}`).ReplaceAllString(str, " ")
|
//再将[\t\n\f\r]等转为空格
str = regexp.MustCompile(`\s`).ReplaceAllString(str, " ")
}
return strings.TrimSpace(str)
}
| |
index.js
|
"use strict";
if (process.env.NODE_ENV === 'development') {
const electronHot = require('electron-hot-loader');
electronHot.install({higherOrderFunctions: ['connect']});
electronHot.watchJsx(['src/**/*.jsx']);
electronHot.watchCss(['src/assets/**/*.css']);
|
require('./index.jsx');
|
}
|
healthcheckextension_test.go
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package healthcheckextension
import (
"context"
"net"
"net/http"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/config/confignet"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil"
)
func ensureServerRunning(url string) func() bool {
return func() bool {
_, err := net.DialTimeout("tcp", url, 30*time.Second)
return err == nil
}
}
func TestHealthCheckExtensionUsageWithoutCheckCollectorPipeline(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
Path: "/",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() { require.NoError(t, hcExt.Shutdown(context.Background())) })
// Give a chance for the server goroutine to run.
runtime.Gosched()
client := &http.Client{}
url := "http://" + config.TCPAddr.Endpoint
resp0, err := client.Get(url)
require.NoError(t, err)
defer resp0.Body.Close()
require.Equal(t, http.StatusServiceUnavailable, resp0.StatusCode)
require.NoError(t, hcExt.Ready())
resp1, err := client.Get(url)
require.NoError(t, err)
defer resp1.Body.Close()
require.Equal(t, http.StatusOK, resp1.StatusCode)
require.NoError(t, hcExt.NotReady())
resp2, err := client.Get(url)
require.NoError(t, err)
defer resp2.Body.Close()
require.Equal(t, http.StatusServiceUnavailable, resp2.StatusCode)
}
func TestHealthCheckExtensionUsageWithCustomizedPathWithoutCheckCollectorPipeline(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
Path: "/health",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() { require.NoError(t, hcExt.Shutdown(context.Background())) })
require.Eventuallyf(t, ensureServerRunning(config.TCPAddr.Endpoint), 30*time.Second, 1*time.Second, "Failed to start the testing server.")
client := &http.Client{}
url := "http://" + config.TCPAddr.Endpoint + config.Path
resp0, err := client.Get(url)
require.NoError(t, err)
require.NoError(t, resp0.Body.Close(), "Must be able to close the response")
require.Equal(t, http.StatusServiceUnavailable, resp0.StatusCode)
require.NoError(t, hcExt.Ready())
resp1, err := client.Get(url)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp1.StatusCode)
require.NoError(t, resp1.Body.Close(), "Must be able to close the response")
require.NoError(t, hcExt.NotReady())
resp2, err := client.Get(url)
require.NoError(t, err)
require.Equal(t, http.StatusServiceUnavailable, resp2.StatusCode)
require.NoError(t, resp2.Body.Close(), "Must be able to close the response")
}
func TestHealthCheckExtensionUsageWithCheckCollectorPipeline(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: checkCollectorPipelineSettings{
Enabled: true,
Interval: "5m",
ExporterFailureThreshold: 1,
},
Path: "/",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() { require.NoError(t, hcExt.Shutdown(context.Background())) })
// Give a chance for the server goroutine to run.
runtime.Gosched()
newView := view.View{Name: exporterFailureView}
currentTime := time.Now()
vd1 := &view.Data{
View: &newView,
Start: currentTime.Add(-2 * time.Minute),
End: currentTime,
Rows: nil,
}
vd2 := &view.Data{
View: &newView,
Start: currentTime.Add(-1 * time.Minute),
End: currentTime,
Rows: nil,
}
client := &http.Client{}
url := "http://" + config.TCPAddr.Endpoint
resp0, err := client.Get(url)
require.NoError(t, err)
defer resp0.Body.Close()
hcExt.exporter.exporterFailureQueue = append(hcExt.exporter.exporterFailureQueue, vd1)
require.NoError(t, hcExt.Ready())
resp1, err := client.Get(url)
require.NoError(t, err)
defer resp1.Body.Close()
require.Equal(t, http.StatusOK, resp1.StatusCode)
require.NoError(t, hcExt.NotReady())
resp2, err := client.Get(url)
require.NoError(t, err)
defer resp2.Body.Close()
require.Equal(t, http.StatusInternalServerError, resp2.StatusCode)
hcExt.exporter.exporterFailureQueue = append(hcExt.exporter.exporterFailureQueue, vd2)
require.NoError(t, hcExt.Ready())
resp3, err := client.Get(url)
require.NoError(t, err)
defer resp3.Body.Close()
require.Equal(t, http.StatusInternalServerError, resp3.StatusCode)
}
func TestHealthCheckExtensionUsageWithCustomPathWithCheckCollectorPipeline(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: checkCollectorPipelineSettings{
Enabled: true,
Interval: "5m",
ExporterFailureThreshold: 1,
},
Path: "/health",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost()))
t.Cleanup(func() { require.NoError(t, hcExt.Shutdown(context.Background())) })
// Give a chance for the server goroutine to run.
runtime.Gosched()
require.Eventuallyf(t, ensureServerRunning(config.TCPAddr.Endpoint), 30*time.Second, 1*time.Second, "Failed to start the testing server.")
newView := view.View{Name: exporterFailureView}
currentTime := time.Now()
vd1 := &view.Data{
View: &newView,
Start: currentTime.Add(-2 * time.Minute),
End: currentTime,
Rows: nil,
}
vd2 := &view.Data{
View: &newView,
Start: currentTime.Add(-1 * time.Minute),
End: currentTime,
Rows: nil,
}
client := &http.Client{}
url := "http://" + config.TCPAddr.Endpoint + config.Path
resp0, err := client.Get(url)
require.NoError(t, err)
require.NoError(t, resp0.Body.Close(), "Must be able to close the response")
hcExt.exporter.exporterFailureQueue = append(hcExt.exporter.exporterFailureQueue, vd1)
require.NoError(t, hcExt.Ready())
resp1, err := client.Get(url)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp1.StatusCode)
require.NoError(t, resp1.Body.Close(), "Must be able to close the response")
require.NoError(t, hcExt.NotReady())
resp2, err := client.Get(url)
require.NoError(t, err)
require.Equal(t, http.StatusInternalServerError, resp2.StatusCode)
require.NoError(t, resp2.Body.Close(), "Must be able to close the response")
hcExt.exporter.exporterFailureQueue = append(hcExt.exporter.exporterFailureQueue, vd2)
require.NoError(t, hcExt.Ready())
resp3, err := client.Get(url)
require.NoError(t, err)
require.Equal(t, http.StatusInternalServerError, resp3.StatusCode)
require.NoError(t, resp3.Body.Close(), "Must be able to close the response")
}
func TestHealthCheckExtensionPortAlreadyInUse(t *testing.T) {
endpoint := testutil.GetAvailableLocalAddress(t)
// This needs to be ":port" because health checks also tries to connect to ":port".
// To avoid the pop-up "accept incoming network connections" health check should be changed
// to accept an address.
ln, err := net.Listen("tcp", endpoint)
require.NoError(t, err)
defer ln.Close()
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: endpoint,
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
mh := newAssertNoErrorHost(t)
require.Error(t, hcExt.Start(context.Background(), mh))
}
func TestHealthCheckMultipleStarts(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
Path: "/",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
mh := newAssertNoErrorHost(t)
require.NoError(t, hcExt.Start(context.Background(), mh))
t.Cleanup(func() { require.NoError(t, hcExt.Shutdown(context.Background())) })
require.Error(t, hcExt.Start(context.Background(), mh))
}
func TestHealthCheckMultipleShutdowns(t *testing.T)
|
func TestHealthCheckShutdownWithoutStart(t *testing.T) {
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Shutdown(context.Background()))
}
// assertNoErrorHost implements a component.Host that asserts that there were no errors.
type assertNoErrorHost struct {
component.Host
*testing.T
}
// newAssertNoErrorHost returns a new instance of assertNoErrorHost.
func newAssertNoErrorHost(t *testing.T) component.Host {
return &assertNoErrorHost{
Host: componenttest.NewNopHost(),
T: t,
}
}
func (aneh *assertNoErrorHost) ReportFatalError(err error) {
assert.NoError(aneh, err)
}
|
{
config := Config{
TCPAddr: confignet.TCPAddr{
Endpoint: testutil.GetAvailableLocalAddress(t),
},
CheckCollectorPipeline: defaultCheckCollectorPipelineSettings(),
Path: "/",
}
hcExt := newServer(config, zap.NewNop())
require.NotNil(t, hcExt)
require.NoError(t, hcExt.Start(context.Background(), componenttest.NewNopHost()))
require.NoError(t, hcExt.Shutdown(context.Background()))
require.NoError(t, hcExt.Shutdown(context.Background()))
}
|
mod.rs
|
pub use self::fdir::*;
pub use self::phy_port::*;
pub use self::virt_port::*;
use allocators::*;
use common::*;
use interface::{PacketRx, PacketTx};
use native::zcsi::MBuf;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
pub mod fdir;
mod phy_port;
mod virt_port;
/// Statistics for PMD port.
pub struct PortStats {
pub stats: AtomicUsize,
pub queued: AtomicUsize,
pub q_len: AtomicUsize,
pub max_q_len: AtomicUsize,
pub cycles: AtomicU64,
}
impl PortStats {
pub fn
|
() -> CacheAligned<PortStats> {
// virtual ports do often not support reading the queue length,
// for those we need to initialize with a q_len > 0, e.g. 1
CacheAligned::allocate(PortStats {
stats: AtomicUsize::new(0),
queued: AtomicUsize::new(0),
q_len: AtomicUsize::new(1),
max_q_len: AtomicUsize::new(1),
cycles: AtomicU64::new(0),
})
}
pub fn get_q_len(&self) -> usize {
self.q_len.load(Ordering::Relaxed)
}
pub fn get_max_q_len(&self) -> usize {
self.max_q_len.load(Ordering::Relaxed)
}
pub fn cycles(&self) -> u64 {
self.cycles.load(Ordering::Relaxed)
}
pub fn set_q_len(&self, len: usize) -> usize {
let q_max = self.get_max_q_len();
if len > q_max {
self.max_q_len.store(len, Ordering::Relaxed);
}
self.q_len.swap(len, Ordering::Relaxed)
}
}
impl<T: PacketRx> PacketRx for CacheAligned<T> {
#[inline]
fn recv(&self, pkts: &mut [*mut MBuf]) -> errors::Result<(u32, i32)> {
T::recv(&*self, pkts)
}
#[inline]
fn queued(&self) -> usize {
T::queued(&self)
}
}
impl<T: PacketTx> PacketTx for CacheAligned<T> {
#[inline]
fn send(&mut self, pkts: &mut [*mut MBuf]) -> errors::Result<u32> {
T::send(&mut *self, pkts)
}
}
|
new
|
global.ts
|
import { GMApi } from 'greasemonkey';
/**
* The global data which are stored in the browser.
*/
export class Global {
/**
* Github access token.
*/
public static token: string = '';
}
/**
* Storage flag.
*/
export enum StorageFlag {
Token = 'github-token',
}
/**
* Store a Global field.
* @param flag storage flag
* @param value data
* @param callback set value callback
*/
export function setStorage(flag: StorageFlag, value: any, callback?: () => void) {
GMApi.GM_setValue(flag.toString(), value);
if (callback) {
callback();
}
}
|
/**
* Read all fields into Global object.
* @param callback get value callback
*/
export function readStorage(callback: () => void) {
Global.token = GMApi.GM_getValue(StorageFlag.Token.toString());
callback();
}
/**
* Get a global field from storage.
* @param flag storage flag
* @param callback get value callback
*/
export function getStorage(flag: StorageFlag, callback: (item: any) => void) {
callback(GMApi.GM_getValue(flag.toString()));
}
/**
* Remove a field from storage.
* @param flag storage flag
* @param callback delete value callback
*/
export function removeStorage(flag: StorageFlag, callback?: () => void) {
GMApi.GM_deleteValue(flag.toString());
if (callback) {
callback();
}
}
/**
* Callback invoked when config clicked.
*/
export function onConfigClicked() {
getStorage(StorageFlag.Token, data => {
const token: string = data;
if (!token) {
if (confirm('Do you want to add a token to access the private repos?')) {
addToken();
} else {
alert('You can click the config button to reopen this dialog.');
}
} else {
removeToken(token);
}
});
}
/**
* Add token...
*/
function addToken() {
const token = prompt('Please enter your Github token: \n(to get token, please visit https://github.com/settings/tokens)');
if (token === null) return;
if (token.trim().length === 0) {
alert('You have entered an empty token.');
} else {
setStorage(StorageFlag.Token, token, () => {
alert('Your Github token has been set successfully, reload this page to see changes.');
});
}
}
/**
* Remove token...
*/
function removeToken(token: string) {
const ok = confirm(`You have already set your Github token (${token}), want to remove it?`);
if (ok) {
removeStorage(StorageFlag.Token, () => {
alert('You have successfully removed Github token.');
});
}
}
| |
youtube_dl_client.py
|
from __future__ import unicode_literals
import youtube_dl
class YoutubeDLClient():
"""docstring for YoutubeDLClient"""
def __init__(self, ydl_opts):
ydl_opts.update({'logger': self.MyLogger()})
ydl_opts.update({'progress_hooks': [self.my_hook]})
self.ydl_opts = ydl_opts
self.ydl = youtube_dl.YoutubeDL(ydl_opts)
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def my_hook(self, d):
print(d['status'])
if d['status'] == 'finished':
print('Done downloading, now converting ...')
def download_video(self, video_id):
"""
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
|
#'progress_hooks': [my_hook],
}
"""
#ydl_opts = {}
#https://www.youtube.com/embed/chElHV99xak?start=53&end=59
youtube_url = 'https://www.youtube.com/watch?v='
self.ydl.download([youtube_url+video_id])
|
}],
#'logger': MyLogger(),
|
whicherrs.go
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package oracle
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"sort"
"github.com/scalingdata/go-x-tools/go/ast/astutil"
"github.com/scalingdata/go-x-tools/go/loader"
"github.com/scalingdata/go-x-tools/go/ssa"
"github.com/scalingdata/go-x-tools/go/ssa/ssautil"
"github.com/scalingdata/go-x-tools/oracle/serial"
)
var builtinErrorType = types.Universe.Lookup("error").Type()
// whicherrs takes an position to an error and tries to find all types, constants
// and global value which a given error can point to and which can be checked from the
// scope where the error lives.
// In short, it returns a list of things that can be checked against in order to handle
// an error properly.
//
// TODO(dmorsing): figure out if fields in errors like *os.PathError.Err
// can be queried recursively somehow.
func whicherrs(q *Query) error {
lconf := loader.Config{Build: q.Build}
if err := setPTAScope(&lconf, q.Scope); err != nil {
return err
}
// Load/parse/type-check the program.
lprog, err := lconf.Load()
if err != nil {
return err
}
q.Fset = lprog.Fset
qpos, err := parseQueryPos(lprog, q.Pos, true) // needs exact pos
if err != nil {
return err
}
prog := ssautil.CreateProgram(lprog, ssa.GlobalDebug)
ptaConfig, err := setupPTA(prog, lprog, q.PTALog, q.Reflection)
if err != nil {
return err
}
path, action := findInterestingNode(qpos.info, qpos.path)
if action != actionExpr {
return fmt.Errorf("whicherrs wants an expression; got %s",
astutil.NodeDescription(qpos.path[0]))
}
var expr ast.Expr
var obj types.Object
switch n := path[0].(type) {
case *ast.ValueSpec:
// ambiguous ValueSpec containing multiple names
return fmt.Errorf("multiple value specification")
case *ast.Ident:
obj = qpos.info.ObjectOf(n)
expr = n
case ast.Expr:
expr = n
default:
return fmt.Errorf("unexpected AST for expr: %T", n)
}
typ := qpos.info.TypeOf(expr)
if !types.Identical(typ, builtinErrorType) {
return fmt.Errorf("selection is not an expression of type 'error'")
}
// Determine the ssa.Value for the expression.
var value ssa.Value
if obj != nil {
// def/ref of func/var object
value, _, err = ssaValueForIdent(prog, qpos.info, obj, path)
} else {
value, _, err = ssaValueForExpr(prog, qpos.info, path)
}
if err != nil {
return err // e.g. trivially dead code
}
// Defer SSA construction till after errors are reported.
prog.Build()
globals := findVisibleErrs(prog, qpos)
constants := findVisibleConsts(prog, qpos)
res := &whicherrsResult{
qpos: qpos,
errpos: expr.Pos(),
}
// TODO(adonovan): the following code is heavily duplicated
// w.r.t. "pointsto". Refactor?
// Find the instruction which initialized the
// global error. If more than one instruction has stored to the global
// remove the global from the set of values that we want to query.
allFuncs := ssautil.AllFunctions(prog)
for fn := range allFuncs {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
store, ok := instr.(*ssa.Store)
if !ok {
continue
}
gval, ok := store.Addr.(*ssa.Global)
if !ok {
continue
}
gbl, ok := globals[gval]
if !ok {
continue
}
// we already found a store to this global
// The normal error define is just one store in the init
// so we just remove this global from the set we want to query
if gbl != nil {
delete(globals, gval)
}
globals[gval] = store.Val
}
}
}
ptaConfig.AddQuery(value)
for _, v := range globals {
ptaConfig.AddQuery(v)
}
ptares := ptrAnalysis(ptaConfig)
valueptr := ptares.Queries[value]
for g, v := range globals {
ptr, ok := ptares.Queries[v]
if !ok {
continue
}
if !ptr.MayAlias(valueptr) {
continue
}
res.globals = append(res.globals, g)
}
pts := valueptr.PointsTo()
dedup := make(map[*ssa.NamedConst]bool)
for _, label := range pts.Labels() {
// These values are either MakeInterfaces or reflect
// generated interfaces. For the purposes of this
// analysis, we don't care about reflect generated ones
makeiface, ok := label.Value().(*ssa.MakeInterface)
if !ok {
continue
}
constval, ok := makeiface.X.(*ssa.Const)
if !ok {
continue
}
c := constants[*constval]
if c != nil && !dedup[c] {
dedup[c] = true
res.consts = append(res.consts, c)
}
}
concs := pts.DynamicTypes()
concs.Iterate(func(conc types.Type, _ interface{}) {
// go/types is a bit annoying here.
// We want to find all the types that we can
// typeswitch or assert to. This means finding out
// if the type pointed to can be seen by us.
//
// For the purposes of this analysis, the type is always
// either a Named type or a pointer to one.
// There are cases where error can be implemented
// by unnamed types, but in that case, we can't assert to
// it, so we don't care about it for this analysis.
var name *types.TypeName
switch t := conc.(type) {
case *types.Pointer:
named, ok := t.Elem().(*types.Named)
if !ok {
return
}
name = named.Obj()
case *types.Named:
name = t.Obj()
default:
return
}
if !isAccessibleFrom(name, qpos.info.Pkg) {
return
}
res.types = append(res.types, &errorType{conc, name})
})
sort.Sort(membersByPosAndString(res.globals))
sort.Sort(membersByPosAndString(res.consts))
sort.Sort(sorterrorType(res.types))
q.result = res
return nil
}
// findVisibleErrs returns a mapping from each package-level variable of type "error" to nil.
func findVisibleErrs(prog *ssa.Program, qpos *queryPos) map[*ssa.Global]ssa.Value {
globals := make(map[*ssa.Global]ssa.Value)
for _, pkg := range prog.AllPackages() {
for _, mem := range pkg.Members {
gbl, ok := mem.(*ssa.Global)
if !ok {
continue
}
gbltype := gbl.Type()
// globals are always pointers
if !types.Identical(deref(gbltype), builtinErrorType) {
continue
}
if !isAccessibleFrom(gbl.Object(), qpos.info.Pkg) {
continue
}
globals[gbl] = nil
}
}
return globals
}
// findVisibleConsts returns a mapping from each package-level constant assignable to type "error", to nil.
func
|
(prog *ssa.Program, qpos *queryPos) map[ssa.Const]*ssa.NamedConst {
constants := make(map[ssa.Const]*ssa.NamedConst)
for _, pkg := range prog.AllPackages() {
for _, mem := range pkg.Members {
obj, ok := mem.(*ssa.NamedConst)
if !ok {
continue
}
consttype := obj.Type()
if !types.AssignableTo(consttype, builtinErrorType) {
continue
}
if !isAccessibleFrom(obj.Object(), qpos.info.Pkg) {
continue
}
constants[*obj.Value] = obj
}
}
return constants
}
type membersByPosAndString []ssa.Member
func (a membersByPosAndString) Len() int { return len(a) }
func (a membersByPosAndString) Less(i, j int) bool {
cmp := a[i].Pos() - a[j].Pos()
return cmp < 0 || cmp == 0 && a[i].String() < a[j].String()
}
func (a membersByPosAndString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type sorterrorType []*errorType
func (a sorterrorType) Len() int { return len(a) }
func (a sorterrorType) Less(i, j int) bool {
cmp := a[i].obj.Pos() - a[j].obj.Pos()
return cmp < 0 || cmp == 0 && a[i].typ.String() < a[j].typ.String()
}
func (a sorterrorType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type errorType struct {
typ types.Type // concrete type N or *N that implements error
obj *types.TypeName // the named type N
}
type whicherrsResult struct {
qpos *queryPos
errpos token.Pos
globals []ssa.Member
consts []ssa.Member
types []*errorType
}
func (r *whicherrsResult) display(printf printfFunc) {
if len(r.globals) > 0 {
printf(r.qpos, "this error may point to these globals:")
for _, g := range r.globals {
printf(g.Pos(), "\t%s", g.RelString(r.qpos.info.Pkg))
}
}
if len(r.consts) > 0 {
printf(r.qpos, "this error may contain these constants:")
for _, c := range r.consts {
printf(c.Pos(), "\t%s", c.RelString(r.qpos.info.Pkg))
}
}
if len(r.types) > 0 {
printf(r.qpos, "this error may contain these dynamic types:")
for _, t := range r.types {
printf(t.obj.Pos(), "\t%s", r.qpos.typeString(t.typ))
}
}
}
func (r *whicherrsResult) toSerial(res *serial.Result, fset *token.FileSet) {
we := &serial.WhichErrs{}
we.ErrPos = fset.Position(r.errpos).String()
for _, g := range r.globals {
we.Globals = append(we.Globals, fset.Position(g.Pos()).String())
}
for _, c := range r.consts {
we.Constants = append(we.Constants, fset.Position(c.Pos()).String())
}
for _, t := range r.types {
var et serial.WhichErrsType
et.Type = r.qpos.typeString(t.typ)
et.Position = fset.Position(t.obj.Pos()).String()
we.Types = append(we.Types, et)
}
res.WhichErrs = we
}
|
findVisibleConsts
|
diffcheck.py
|
#!/usr/bin/env python
import argparse
from datetime import date
import hashlib
import logging
import sys
import textwrap
from classes.resource import Resource
from classes.dbmanager import ResourceStorage
from classes.reporter import HtmlReport
import helpers
def get_reports_path(path):
today = date.today()
return "{0}/{1}/{2}/".format(path, today.month, today.day)
def check_differences(resources, report):
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="diffcheck.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Resource Difference Checker
See https://github.com/bayotop/resdiffcheck for more information.
"""))
parser.add_argument("db", help="database with resources to check")
parser.add_argument("report_dir", help="target directory for reports (without trailing /)")
parser.add_argument("-l", "--logfile", default="process.log", help="default ./process.log")
args = parser.parse_args()
logging.basicConfig(filename=args.logfile,level=logging.DEBUG)
storage = ResourceStorage(args.db)
if not storage.load():
sys.exit()
report = HtmlReport(get_reports_path(args.report_dir), "diff.html")
changed_resources = check_differences(storage.getall(), report)
if changed_resources:
storage.add_multiple(changed_resources)
|
report.add_urls(resources)
changed_resources = []
for resource in resources:
actual_content = helpers.fetch_resource(resource.url)
if actual_content:
if (hashlib.sha256(actual_content).hexdigest() != resource.content.hash):
report.add(resource, actual_content)
resource.update(actual_content)
changed_resources.append(resource)
report.save()
return changed_resources
|
errors.ts
|
import { LexerTokenList } from "./lexer/types";
export class IMAPError extends Error {
public source?: string;
public readonly wrappedError?: Error;
constructor(msg: string);
constructor(wrappedError: Error);
constructor(msg: string, wrappedError: Error);
constructor(msgOrErr: string | Error, wrappedError?: Error) {
super(typeof msgOrErr === "string" ? msgOrErr : msgOrErr.message);
if (wrappedError) {
this.wrappedError = wrappedError;
} else if (typeof msgOrErr !== "string") {
this.wrappedError = msgOrErr;
}
}
}
export class TokenizationError extends Error {
constructor(message: string, public readonly input: string) {
super(message);
}
toString(): string {
return [this.message, `\tInput: ${this.input}`].join("\n");
}
}
export class ParsingError extends Error {
constructor(
message: string,
public readonly input?: string | LexerTokenList,
) {
super(message);
}
toString(): string {
let inputStr: string;
if (Array.isArray(this.input)) {
inputStr = "";
this.input.forEach((i) => (inputStr += i.value));
} else {
inputStr = this.input;
}
return [this.message, inputStr].join("\n");
}
}
|
public readonly actual: string | string[],
) {
super("Invalid parsed data");
}
toString(): string {
return [
this.message,
`\tExpected: [${this.expected}]`,
`\tActual: ${
typeof this.actual === "string"
? this.actual
: `[${this.actual}]`
}`,
].join("\n");
}
}
export class NotImplementedError extends Error {
constructor(what: string) {
super(
`"${what}" has not been implemented or is not available in the current context`,
);
}
}
|
export class InvalidParsedDataError extends Error {
constructor(
public readonly expected: string[],
|
ioredis-probe.js
|
/*
Copyright (c) AppDynamics, Inc., and its affiliates
2015
All Rights Reserved
*/
'use strict';
var uuid = require('uuid');
function IoredisProbe(agent) {
this.agent = agent;
this.packages = ['ioredis'];
this.callbackQueue = {};
}
exports.IoredisProbe = IoredisProbe;
/*
* The 'sendCommand' function is probed for all the redis command calls.
* In v3+ of the redis driver, the callback passed to the redis command
* calls are wrapped in a promise. The callback used for wrapping is 'pass
* by value'. Hence, the 'callback' function received as part of arguments
* to the 'sendCommand' is different.
* Over here, capture the callback function passed to the redis command calls and place
* the probes around it. Now this probed callback function is wrapped in
* promise by redis utilities. Also attach a unique id to this probed callback
* function which can be used by 'sendCommand' probes to put exit call details
* for the redis call in 'callbackQueue'. In the callback invocation, the exit
* call details can be fetched form the 'callbackQueue' and used for completing
* the exit call.
*/
IoredisProbe.prototype.attach = function(obj) {
var self = this;
if(obj.__appdynamicsProbeAttached__) return;
obj.__appdynamicsProbeAttached__ = true;
self.agent.on('destroy', function() {
if(obj.__appdynamicsProbeAttached__) {
delete obj.__appdynamicsProbeAttached__;
proxy.release(obj.Cluster.prototype.sendCommand);
proxy.release(obj.prototype.sendCommand);
}
});
var proxy = self.agent.proxy;
var profiler = self.agent.profiler;
var clusters = {};
proxy.before(obj.Cluster.prototype, "sendCommand", function(obj) {
var serverPool = [];
if(Array.isArray(obj.startupNodes)) {
obj.startupNodes.forEach(function(node) {
var address = node.host + ':' + node.port;
serverPool.push(address);
clusters[address] = serverPool;
});
}
});
var builtInCommandsList = obj.prototype.getBuiltinCommands();
builtInCommandsList.forEach(function(command) {
proxy.before(obj.prototype, command, function(obj, args) {
var uniqueIdForCb = uuid.v4();
var callbackHooked = proxy.callback(args, -1, function(obj, args) {
if (self.callbackQueue[uniqueIdForCb]) {
complete(args[0], self.callbackQueue[uniqueIdForCb].locals);
delete self.callbackQueue[uniqueIdForCb];
}
}, null, self.agent.thread.current());
if (callbackHooked)
args[args.length - 1].__appdCbId = uniqueIdForCb;
});
});
proxy.around(obj.prototype, "sendCommand", function(obj, args, locals) {
var redis = obj;
var command = args[0];
var commandName = command.name;
var commandArgs = command.args;
var address = redis.options.host + ':' + redis.options.port;
locals.time = profiler.time();
var serverPool = clusters[address];
if(serverPool) {
address = serverPool.join('\n');
}
var supportedProperties = {
'SERVER POOL': address,
'VENDOR': 'REDIS'
};
locals.exitCall = profiler.createExitCall(locals.time, {
exitType: 'EXIT_CACHE',
supportedProperties: supportedProperties,
command: commandName,
commandArgs: profiler.sanitize(commandArgs),
stackTrace: profiler.stackTrace()
});
if (!locals.exitCall) return;
if(command.callback && typeof(command.callback) === 'function') {
locals.methodHasCb = true;
if (command.callback.__appdCbId) {
self.callbackQueue[command.callback.__appdCbId] = {
args: args,
locals: locals
};
if (process.env.NODE_ENV === 'appd_test')
locals.exitCall.__appdCbId = command.callback.__appdCbId;
}
}
}, after, false, self.agent.thread.current());
function after(obj,args, ret, locals) {
if (locals.methodHasCb)
return;
if (!ret || !ret.__appdynamicsIsPromiseResult__)
complete(null, locals);
else if (ret.error)
complete(ret.error, locals);
else
complete(null, locals);
}
function
|
(err, locals) {
if (!locals.exitCall) return;
if (!locals.time.done()) return;
var error = self.agent.proxy.getErrorObject(err);
profiler.addExitCall(locals.time, locals.exitCall, error);
}
};
|
complete
|
strings.f.Repeat.go
|
/************************************************************************************
**Author: Axe Tang; Email: [email protected]
**Package: strings
**Element: strings.Repeat
**Type: func
------------------------------------------------------------------------------------
**Definition:
func Repeat(s string, count int) string
------------------------------------------------------------------------------------
**Description:
Repeat returns a new string consisting of count copies of the string s.
It panics if count is negative or if the result of (len(s) * count) overflows.
------------------------------------------------------------------------------------
**要点总结:
1. Repeat方法返回由参数s字符串重复count次后的新字符串;
2. 如果count是负值,或者len(s)*count溢出,则函数抛出panic。
*************************************************************************************/
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println("ba" + strings.Repeat("na", 2))
}
| ||
generateNonCodeFiles.ts
|
/*!
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
import * as child_process from 'child_process'
import * as fs from 'fs-extra'
import * as marked from 'marked'
import * as path from 'path'
// doesn't use path utils as this should be formatted for finding images with HTML markup
const REPO_ROOT = path.dirname(__dirname)
/**
* replaces relative paths with an `!!EXTENSIONROOT!!` token.
* This makes it easier to swap in relative links when the extension loads.
* @param root Repository root
* @param inputFile Input .md file to swap to HTML
* @param outputFile Filepath to output HTML to
* @param cn Converts "AWS" to "Amazon" for CN-based compute.
*/
function translateReadmeToHtml(root: string, inputFile: string, outputFile: string, cn: boolean = false) {
const fileText = fs.readFileSync(path.join(root, inputFile)).toString()
const relativePathRegex = /]\(\.\//g
let transformedText = fileText.replace(relativePathRegex, '](!!EXTENSIONROOT!!/')
if (cn) {
transformedText = transformedText.replace(/AWS/g, 'Amazon').replace(/-en.png/g, '-cn.png')
}
marked(transformedText, (err, result) => {
fs.writeFileSync(path.join(root, outputFile), result)
})
}
/**
* Do a best effort job of generating a git hash and putting it into the package
*/
function generateFileHash(root: string) {
try {
const response = child_process.execSync('git rev-parse HEAD')
|
} catch (e) {
console.log(`Getting commit hash failed ${e}`)
}
}
translateReadmeToHtml(REPO_ROOT, 'README.quickstart.vscode.md', 'quickStartVscode.html')
translateReadmeToHtml(REPO_ROOT, 'README.quickstart.cloud9.md', 'quickStartCloud9.html')
translateReadmeToHtml(REPO_ROOT, 'README.quickstart.cloud9.md', 'quickStartCloud9-cn.html', true)
generateFileHash(REPO_ROOT)
|
fs.outputFileSync(path.join(root, '.gitcommit'), response)
|
cats_api.py
|
import json
import re
import requests
from django.conf import settings
from cathie.exceptions import CatsAnswerCodeException
from cathie import authorization
def cats_check_status():
pass
@authorization.check_authorization_for_cats
def cats_submit_solution(source_text: str, problem_id: int, de_id: int, source=None):
# ToDo обработать повторную отправку решения
url = f'{settings.CATS_URL}main.pl?f=api_submit_problem;json=1;'
url += f'sid={authorization.cats_sid()}'
data = {
'de_id': de_id,
'source_text': source_text,
'problem_id': problem_id
}
r = requests.post(url, data=data)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
r_content = json.loads(r.content.decode('utf-8'))
req_ids = None
if r_content.get('href_run_details'):
req_ids = re.search(r'(?<=rid=)\d+', r_content['href_run_details']).group()
if req_ids.isdigit():
req_ids = int(req_ids)
return req_ids, r_content
def cats_submit_problem():
pass
@authorization.check_authorization_for_cats
def cats_check_solution_status(req_ids: int):
url = f'{settings.CATS_URL}main.pl?f=api_get_request_state;req_ids={req_ids};json=1;'
url += f'sid={authorization.cats_sid()}'
r = requests.get(url)
if r.status_code != 200:
raise CatsAnswerCodeException(r)
data = r.json()
|
if data:
return data[0]['verdict'], data
@authorization.check_authorization_for_cats
def cats_get_problems_from_contest(contest_id):
url = f'{settings.CATS_URL}?f=problems;json=1;cid={contest_id};'
url += f'sid={authorization.cats_sid()}'
answer = requests.get(url)
if answer.status_code != 200:
raise CatsAnswerCodeException(answer)
data = json.loads(answer.content.decode('utf-8'))
# course_problems = CatsProblemSerializer(data=data.problems, many=True)
return data['problems']
def cats_get_problem_description_by_url(description_url):
url = f'{settings.CATS_URL}{description_url.lstrip("./")}'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4356.6 Safari/537.36'
}
request = requests.request(method='get', url=url, headers=headers)
if request.status_code != 200:
raise CatsAnswerCodeException(request)
data = request.content.decode('utf-8')
return data
# def cats_get_problem_by_id(cats_id, user):
# pass
| |
A.py
|
n = int(input())
|
if n == 7 or n == 5 or n== 3:
print("YES")
else:
print("NO")
| |
environment.ts
|
// The file contents for the current environment will overwrite these during build.
// The build system defaults to the dev environment which uses `environment.ts`, but if you do
// `ng build --env=prod` then `environment.prod.ts` will be used instead.
// The list of which env maps to which file can be found in `.angular-cli.json`.
export const environment = {
production: false,
baseUrl:'http://localhost:4000',
|
apiUrl: 'http://localhost:4000/api/v1'
};
| |
component.py
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple, Optional
def
|
(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: Optional[List[Any]] = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
|
run_diagnose_me
|
biz.js
|
define([],function(){var e={user:{}};return e.user.browser=function(e,r,n){var o="visible";n&&n.mode&&(o=n.mode);var a="0";$.isArray(e)&&0<e.length&&(a=e.join()),$("#user-browser-dialog")[0]&&$("#user-browser-dialog").remove();var i="./index.php?c=utility&a=user&do=browser&callback=aMember&mode="+o+"&uids="+a,t=util.dialog("请选择用户","数据加载中......",'<button type="button" class="btn btn-default" data-dismiss="modal">取消</button><button type="button" class="btn btn-primary" style="display: none;">确认</button>',{containerName:"user-browser-dialog"});t.modal("show"),t.on("shown.bs.modal",function(){window.aMember.pIndex=1,window.aMember.query()}),t.find(".modal-footer .btn-primary").click(function(){var e=[],n=$(".user-browser .btn-primary");0<n.length&&(n.each(function(){e.push($(this).attr("js-uid"))}),$.isFunction(r)&&(r(e),t.modal("hide")))}),window.aMember={pIndex:1,query:function(){var e={keyword:$("#keyword").val(),page:aMember.pIndex,callback:"aMember",mode:o,uids:a};$.post(i,e,function(e){e.message&&0!=e.message.error&&(e=e.message.message),t.find(".modal-body").html(e),n.direct&&t.find(".js-btn-select").click(function(){t.find(".modal-footer .btn-primary").trigger("click")}),t.find(".pagination a").click(function(){window.aMember.pIndex=$(this).attr("page"),window.aMember.query()})})}}},e});
|
||
test_distributions_random.py
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import numpy.testing as npt
import scipy.stats as st
from scipy.special import expit
from scipy import linalg
import numpy.random as nr
import theano
import pymc3 as pm
from pymc3.distributions.dist_math import clipped_beta_rvs
from pymc3.distributions.distribution import (draw_values,
_DrawValuesContext,
_DrawValuesContextBlocker)
from .helpers import SeededTest
from .test_distributions import (
build_model, Domain, product, R, Rplus, Rplusbig, Runif, Rplusdunif,
Unit, Nat, NatSmall, I, Simplex, Vector, PdMatrix,
PdMatrixChol, PdMatrixCholUpper, RealMatrix, RandomPdMatrix
)
def pymc3_random(dist, paramdomains, ref_rand, valuedomain=Domain([0]),
size=10000, alpha=0.05, fails=10, extra_args=None,
model_args=None):
if model_args is None:
model_args = {}
model = build_model(dist, valuedomain, paramdomains, extra_args)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
pt.update(model_args)
p = alpha
# Allow KS test to fail (i.e., the samples be different)
# a certain number of times. Crude, but necessary.
f = fails
while p <= alpha and f > 0:
s0 = model.named_vars['value'].random(size=size, point=pt)
s1 = ref_rand(size=size, **pt)
_, p = st.ks_2samp(np.atleast_1d(s0).flatten(),
np.atleast_1d(s1).flatten())
f -= 1
assert p > alpha, str(pt)
def pymc3_random_discrete(dist, paramdomains,
valuedomain=Domain([0]), ref_rand=None,
size=100000, alpha=0.05, fails=20):
model = build_model(dist, valuedomain, paramdomains)
domains = paramdomains.copy()
for pt in product(domains, n_samples=100):
pt = pm.Point(pt, model=model)
p = alpha
# Allow Chisq test to fail (i.e., the samples be different)
# a certain number of times.
f = fails
while p <= alpha and f > 0:
o = model.named_vars['value'].random(size=size, point=pt)
e = ref_rand(size=size, **pt)
o = np.atleast_1d(o).flatten()
e = np.atleast_1d(e).flatten()
observed = dict(zip(*np.unique(o, return_counts=True)))
expected = dict(zip(*np.unique(e, return_counts=True)))
for e in expected.keys():
expected[e] = (observed.get(e, 0), expected[e])
k = np.array([v for v in expected.values()])
if np.all(k[:, 0] == k[:, 1]):
p = 1.
else:
_, p = st.chisquare(k[:, 0], k[:, 1])
f -= 1
assert p > alpha, str(pt)
class TestDrawValues(SeededTest):
def test_draw_scalar_parameters(self):
with pm.Model():
y = pm.Normal('y1', mu=0., sigma=1.)
mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
npt.assert_almost_equal(mu, 0)
npt.assert_almost_equal(tau, 1)
def test_draw_dependencies(self):
with pm.Model():
x = pm.Normal('x', mu=0., sigma=1.)
exp_x = pm.Deterministic('exp_x', pm.math.exp(x))
x, exp_x = draw_values([x, exp_x])
npt.assert_almost_equal(np.exp(x), exp_x)
def test_draw_order(self):
with pm.Model():
x = pm.Normal('x', mu=0., sigma=1.)
exp_x = pm.Deterministic('exp_x', pm.math.exp(x))
# Need to draw x before drawing log_x
exp_x, x = draw_values([exp_x, x])
npt.assert_almost_equal(np.exp(x), exp_x)
def test_draw_point_replacement(self):
with pm.Model():
mu = pm.Normal('mu', mu=0., tau=1e-3)
sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
y = pm.Normal('y', mu=mu, sigma=sigma)
mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],
point={'mu': 5., 'sigma': 2.})
npt.assert_almost_equal(mu2, 5)
npt.assert_almost_equal(tau2, 1 / 2.**2)
def test_random_sample_returns_nd_array(self):
with pm.Model():
mu = pm.Normal('mu', mu=0., tau=1e-3)
sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)
y = pm.Normal('y', mu=mu, sigma=sigma)
mu, tau = draw_values([y.distribution.mu, y.distribution.tau])
assert isinstance(mu, np.ndarray)
assert isinstance(tau, np.ndarray)
class TestDrawValuesContext:
def test_normal_context(self):
with _DrawValuesContext() as context0:
assert context0.parent is None
context0.drawn_vars['root_test'] = 1
with _DrawValuesContext() as context1:
assert id(context1.drawn_vars) == id(context0.drawn_vars)
assert context1.parent == context0
with _DrawValuesContext() as context2:
assert id(context2.drawn_vars) == id(context0.drawn_vars)
assert context2.parent == context1
context2.drawn_vars['leaf_test'] = 2
assert context1.drawn_vars['leaf_test'] == 2
context1.drawn_vars['root_test'] = 3
assert context0.drawn_vars['root_test'] == 3
assert context0.drawn_vars['leaf_test'] == 2
def test_blocking_context(self):
with _DrawValuesContext() as context0:
assert context0.parent is None
context0.drawn_vars['root_test'] = 1
with _DrawValuesContext() as context1:
assert id(context1.drawn_vars) == id(context0.drawn_vars)
assert context1.parent == context0
with _DrawValuesContextBlocker() as blocker:
assert id(blocker.drawn_vars) != id(context0.drawn_vars)
assert blocker.parent is None
blocker.drawn_vars['root_test'] = 2
with _DrawValuesContext() as context2:
assert id(context2.drawn_vars) == id(blocker.drawn_vars)
assert context2.parent == blocker
context2.drawn_vars['root_test'] = 3
context2.drawn_vars['leaf_test'] = 4
assert blocker.drawn_vars['root_test'] == 3
assert 'leaf_test' not in context1.drawn_vars
assert context0.drawn_vars['root_test'] == 1
class BaseTestCases:
class BaseTestCase(SeededTest):
shape = 5
def setup_method(self, *args, **kwargs):
super().setup_method(*args, **kwargs)
self.model = pm.Model()
def get_random_variable(self, shape, with_vector_params=False, name=None):
if with_vector_params:
params = {key: value * np.ones(self.shape, dtype=np.dtype(type(value))) for
key, value in self.params.items()}
else:
params = self.params
if name is None:
name = self.distribution.__name__
with self.model:
if shape is None:
return self.distribution(name, transform=None, **params)
else:
try:
return self.distribution(name, shape=shape, transform=None, **params)
except TypeError:
if np.sum(np.atleast_1d(shape)) == 0:
pytest.skip("Timeseries must have positive shape")
raise
@staticmethod
def sample_random_variable(random_variable, size):
try:
return random_variable.random(size=size)
except AttributeError:
return random_variable.distribution.random(size=size)
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_scalar_parameter_shape(self, size):
rv = self.get_random_variable(None)
if size is None:
expected = 1,
else:
expected = np.atleast_1d(size).tolist()
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_scalar_shape(self, size):
shape = 10
rv = self.get_random_variable(shape)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.append(shape)
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_parameters_1d_shape(self, size):
rv = self.get_random_variable(self.shape, with_vector_params=True)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.append(self.shape)
actual = self.sample_random_variable(rv, size).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)
def test_broadcast_shape(self, size):
broadcast_shape = (2 * self.shape, self.shape)
rv = self.get_random_variable(broadcast_shape, with_vector_params=True)
if size is None:
expected = []
else:
expected = np.atleast_1d(size).tolist()
expected.extend(broadcast_shape)
actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape
assert tuple(expected) == actual
@pytest.mark.parametrize('shape', [(), (1,), (1, 1), (1, 2), (10, 10, 1), (10, 10, 2)], ids=str)
def test_different_shapes_and_sample_sizes(self, shape):
prefix = self.distribution.__name__
rv = self.get_random_variable(shape, name=f'{prefix}_{shape}')
for size in (None, 1, 5, (4, 5)):
if size is None:
s = []
else:
try:
s = list(size)
except TypeError:
s = [size]
if s == [1]:
s = []
if shape not in ((), (1,)):
s.extend(shape)
e = tuple(s)
a = self.sample_random_variable(rv, size).shape
assert e == a
class TestGaussianRandomWalk(BaseTestCases.BaseTestCase):
distribution = pm.GaussianRandomWalk
params = {'mu': 1., 'sigma': 1.}
@pytest.mark.xfail(reason="Supporting this makes a nasty API")
def test_broadcast_shape(self):
super().test_broadcast_shape()
class TestNormal(BaseTestCases.BaseTestCase):
distribution = pm.Normal
params = {'mu': 0., 'tau': 1.}
class TestTruncatedNormal(BaseTestCases.BaseTestCase):
distribution = pm.TruncatedNormal
params = {'mu': 0., 'tau': 1., 'lower': -0.5, 'upper': 0.5}
class TestTruncatedNormalLower(BaseTestCases.BaseTestCase):
distribution = pm.TruncatedNormal
params = {'mu': 0., 'tau': 1., 'lower': -0.5}
class TestTruncatedNormalUpper(BaseTestCases.BaseTestCase):
distribution = pm.TruncatedNormal
params = {'mu': 0., 'tau': 1., 'upper': 0.5}
class TestSkewNormal(BaseTestCases.BaseTestCase):
distribution = pm.SkewNormal
params = {'mu': 0., 'sigma': 1., 'alpha': 5.}
class TestHalfNormal(BaseTestCases.BaseTestCase):
distribution = pm.HalfNormal
params = {'tau': 1.}
class TestUniform(BaseTestCases.BaseTestCase):
distribution = pm.Uniform
params = {'lower': 0., 'upper': 1.}
class TestTriangular(BaseTestCases.BaseTestCase):
distribution = pm.Triangular
params = {'c': 0.5, 'lower': 0., 'upper': 1.}
class TestWald(BaseTestCases.BaseTestCase):
distribution = pm.Wald
params = {'mu': 1., 'lam': 1., 'alpha': 0.}
class TestBeta(BaseTestCases.BaseTestCase):
distribution = pm.Beta
params = {'alpha': 1., 'beta': 1.}
class TestKumaraswamy(BaseTestCases.BaseTestCase):
distribution = pm.Kumaraswamy
params = {'a': 1., 'b': 1.}
class TestExponential(BaseTestCases.BaseTestCase):
distribution = pm.Exponential
params = {'lam': 1.}
class TestLaplace(BaseTestCases.BaseTestCase):
distribution = pm.Laplace
params = {'mu': 1., 'b': 1.}
class TestLognormal(BaseTestCases.BaseTestCase):
distribution = pm.Lognormal
params = {'mu': 1., 'tau': 1.}
class TestStudentT(BaseTestCases.BaseTestCase):
distribution = pm.StudentT
params = {'nu': 5., 'mu': 0., 'lam': 1.}
class TestPareto(BaseTestCases.BaseTestCase):
distribution = pm.Pareto
params = {'alpha': 0.5, 'm': 1.}
class TestCauchy(BaseTestCases.BaseTestCase):
distribution = pm.Cauchy
params = {'alpha': 1., 'beta': 1.}
class TestHalfCauchy(BaseTestCases.BaseTestCase):
distribution = pm.HalfCauchy
params = {'beta': 1.}
class TestGamma(BaseTestCases.BaseTestCase):
distribution = pm.Gamma
params = {'alpha': 1., 'beta': 1.}
class TestInverseGamma(BaseTestCases.BaseTestCase):
distribution = pm.InverseGamma
params = {'alpha': 0.5, 'beta': 0.5}
class TestChiSquared(BaseTestCases.BaseTestCase):
distribution = pm.ChiSquared
params = {'nu': 2.}
class TestWeibull(BaseTestCases.BaseTestCase):
distribution = pm.Weibull
params = {'alpha': 1., 'beta': 1.}
class TestExGaussian(BaseTestCases.BaseTestCase):
distribution = pm.ExGaussian
params = {'mu': 0., 'sigma': 1., 'nu': 1.}
class TestVonMises(BaseTestCases.BaseTestCase):
distribution = pm.VonMises
params = {'mu': 0., 'kappa': 1.}
class TestGumbel(BaseTestCases.BaseTestCase):
distribution = pm.Gumbel
params = {'mu': 0., 'beta': 1.}
class TestLogistic(BaseTestCases.BaseTestCase):
distribution = pm.Logistic
params = {'mu': 0., 's': 1.}
class TestLogitNormal(BaseTestCases.BaseTestCase):
distribution = pm.LogitNormal
params = {'mu': 0., 'sigma': 1.}
class TestBinomial(BaseTestCases.BaseTestCase):
distribution = pm.Binomial
params = {'n': 5, 'p': 0.5}
class TestBetaBinomial(BaseTestCases.BaseTestCase):
distribution = pm.BetaBinomial
params = {'n': 5, 'alpha': 1., 'beta': 1.}
class TestBernoulli(BaseTestCases.BaseTestCase):
distribution = pm.Bernoulli
params = {'p': 0.5}
class TestDiscreteWeibull(BaseTestCases.BaseTestCase):
distribution = pm.DiscreteWeibull
params = {'q': 0.25, 'beta': 2.}
class TestPoisson(BaseTestCases.BaseTestCase):
distribution = pm.Poisson
params = {'mu': 1.}
class TestNegativeBinomial(BaseTestCases.BaseTestCase):
distribution = pm.NegativeBinomial
params = {'mu': 1., 'alpha': 1.}
class TestConstant(BaseTestCases.BaseTestCase):
distribution = pm.Constant
params = {'c': 3}
class TestZeroInflatedPoisson(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedPoisson
params = {'theta': 1., 'psi': 0.3}
class TestZeroInflatedNegativeBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedNegativeBinomial
params = {'mu': 1., 'alpha': 1., 'psi': 0.3}
class TestZeroInflatedBinomial(BaseTestCases.BaseTestCase):
distribution = pm.ZeroInflatedBinomial
params = {'n': 10, 'p': 0.6, 'psi': 0.3}
class TestDiscreteUniform(BaseTestCases.BaseTestCase):
distribution = pm.DiscreteUniform
params = {'lower': 0., 'upper': 10.}
class TestGeometric(BaseTestCases.BaseTestCase):
distribution = pm.Geometric
params = {'p': 0.5}
class TestMoyal(BaseTestCases.BaseTestCase):
distribution = pm.Moyal
params = {'mu': 0., 'sigma': 1.}
class TestCategorical(BaseTestCases.BaseTestCase):
distribution = pm.Categorical
params = {'p': np.ones(BaseTestCases.BaseTestCase.shape)}
def get_random_variable(self, shape, with_vector_params=False, **kwargs): # don't transform categories
return super().get_random_variable(shape, with_vector_params=False, **kwargs)
def test_probability_vector_shape(self):
"""Check that if a 2d array of probabilities are passed to categorical correct shape is returned"""
p = np.ones((10, 5))
assert pm.Categorical.dist(p=p).random().shape == (10,)
assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 10)
p = np.ones((3, 7, 5))
assert pm.Categorical.dist(p=p).random().shape == (3, 7)
assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 3, 7)
class TestScalarParameterSamples(SeededTest):
def test_bounded(self):
# A bit crude...
BoundedNormal = pm.Bound(pm.Normal, upper=0)
def ref_rand(size, tau):
return -st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
pymc3_random(BoundedNormal, {'tau': Rplus}, ref_rand=ref_rand)
def test_uniform(self):
def ref_rand(size, lower, upper):
return st.uniform.rvs(size=size, loc=lower, scale=upper - lower)
pymc3_random(pm.Uniform, {'lower': -Rplus, 'upper': Rplus}, ref_rand=ref_rand)
def test_normal(self):
def ref_rand(size, mu, sigma):
return st.norm.rvs(size=size, loc=mu, scale=sigma)
pymc3_random(pm.Normal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
def test_truncated_normal(self):
def ref_rand(size, mu, sigma, lower, upper):
return st.truncnorm.rvs((lower - mu) / sigma, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)
pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig, 'upper': Rplusbig},
ref_rand=ref_rand)
def test_truncated_normal_lower(self):
def ref_rand(size, mu, sigma, lower):
return st.truncnorm.rvs((lower - mu) / sigma, np.inf, size=size, loc=mu, scale=sigma)
pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig},
ref_rand=ref_rand)
def test_truncated_normal_upper(self):
def ref_rand(size, mu, sigma, upper):
return st.truncnorm.rvs(-np.inf, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)
pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'upper': Rplusbig},
ref_rand=ref_rand)
def test_skew_normal(self):
def ref_rand(size, alpha, mu, sigma):
return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma)
pymc3_random(pm.SkewNormal, {'mu': R, 'sigma': Rplus, 'alpha': R}, ref_rand=ref_rand)
def test_half_normal(self):
def ref_rand(size, tau):
return st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
pymc3_random(pm.HalfNormal, {'tau': Rplus}, ref_rand=ref_rand)
def test_wald(self):
# Cannot do anything too exciting as scipy wald is a
# location-scale model of the *standard* wald with mu=1 and lam=1
def ref_rand(size, mu, lam, alpha):
return st.wald.rvs(size=size, loc=alpha)
pymc3_random(pm.Wald,
{'mu': Domain([1., 1., 1.]), 'lam': Domain(
[1., 1., 1.]), 'alpha': Rplus},
ref_rand=ref_rand)
def test_beta(self):
def ref_rand(size, alpha, beta):
return clipped_beta_rvs(a=alpha, b=beta, size=size)
pymc3_random(pm.Beta, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_exponential(self):
def ref_rand(size, lam):
return nr.exponential(scale=1. / lam, size=size)
pymc3_random(pm.Exponential, {'lam': Rplus}, ref_rand=ref_rand)
def test_laplace(self):
def ref_rand(size, mu, b):
return st.laplace.rvs(mu, b, size=size)
pymc3_random(pm.Laplace, {'mu': R, 'b': Rplus}, ref_rand=ref_rand)
def test_lognormal(self):
def ref_rand(size, mu, tau):
return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0., scale=1., size=size))
pymc3_random(pm.Lognormal, {'mu': R, 'tau': Rplusbig}, ref_rand=ref_rand)
def test_student_t(self):
def ref_rand(size, nu, mu, lam):
return st.t.rvs(nu, mu, lam**-.5, size=size)
pymc3_random(pm.StudentT, {'nu': Rplus, 'mu': R, 'lam': Rplus}, ref_rand=ref_rand)
def test_cauchy(self):
def ref_rand(size, alpha, beta):
return st.cauchy.rvs(alpha, beta, size=size)
pymc3_random(pm.Cauchy, {'alpha': R, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_half_cauchy(self):
def ref_rand(size, beta):
return st.halfcauchy.rvs(scale=beta, size=size)
pymc3_random(pm.HalfCauchy, {'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_alpha_beta(self):
def ref_rand(size, alpha, beta):
return st.gamma.rvs(alpha, scale=1. / beta, size=size)
pymc3_random(pm.Gamma, {'alpha': Rplusbig, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_mu_sigma(self):
def ref_rand(size, mu, sigma):
return st.gamma.rvs(mu**2 / sigma**2, scale=sigma ** 2 / mu, size=size)
pymc3_random(pm.Gamma, {'mu': Rplusbig, 'sigma': Rplusbig}, ref_rand=ref_rand)
def test_inverse_gamma(self):
def ref_rand(size, alpha, beta):
return st.invgamma.rvs(a=alpha, scale=beta, size=size)
pymc3_random(pm.InverseGamma, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_pareto(self):
def ref_rand(size, alpha, m):
return st.pareto.rvs(alpha, scale=m, size=size)
pymc3_random(pm.Pareto, {'alpha': Rplusbig, 'm': Rplusbig}, ref_rand=ref_rand)
def test_ex_gaussian(self):
def ref_rand(size, mu, sigma, nu):
return nr.normal(mu, sigma, size=size) + nr.exponential(scale=nu, size=size)
pymc3_random(pm.ExGaussian, {'mu': R, 'sigma': Rplus, 'nu': Rplus}, ref_rand=ref_rand)
def test_vonmises(self):
def ref_rand(size, mu, kappa):
return st.vonmises.rvs(size=size, loc=mu, kappa=kappa)
pymc3_random(pm.VonMises, {'mu': R, 'kappa': Rplus}, ref_rand=ref_rand)
def test_triangular(self):
def ref_rand(size, lower, upper, c):
scale = upper - lower
c_ = (c - lower) / scale
return st.triang.rvs(size=size, loc=lower, scale=scale, c=c_)
pymc3_random(pm.Triangular, {'lower': Runif, 'upper': Runif + 3, 'c': Runif + 1}, ref_rand=ref_rand)
def test_flat(self):
with pm.Model():
f = pm.Flat('f')
with pytest.raises(ValueError):
f.random(1)
def test_half_flat(self):
with pm.Model():
f = pm.HalfFlat('f')
with pytest.raises(ValueError):
f.random(1)
def test_binomial(self):
pymc3_random_discrete(pm.Binomial, {'n': Nat, 'p': Unit}, ref_rand=st.binom.rvs)
def test_beta_binomial(self):
pymc3_random_discrete(pm.BetaBinomial, {'n': Nat, 'alpha': Rplus, 'beta': Rplus},
ref_rand=self._beta_bin)
def _beta_bin(self, n, alpha, beta, size=None):
return st.binom.rvs(n, st.beta.rvs(a=alpha, b=beta, size=size))
def test_bernoulli(self):
pymc3_random_discrete(pm.Bernoulli, {'p': Unit},
ref_rand=lambda size, p=None: st.bernoulli.rvs(p, size=size))
def test_poisson(self):
pymc3_random_discrete(pm.Poisson, {'mu': Rplusbig}, size=500, ref_rand=st.poisson.rvs)
def test_negative_binomial(self):
def ref_rand(size, alpha, mu):
return st.nbinom.rvs(alpha, alpha / (mu + alpha), size=size)
pymc3_random_discrete(pm.NegativeBinomial, {'mu': Rplusbig, 'alpha': Rplusbig},
size=100, fails=50, ref_rand=ref_rand)
def test_geometric(self):
pymc3_random_discrete(pm.Geometric, {'p': Unit}, size=500, fails=50, ref_rand=nr.geometric)
def test_discrete_uniform(self):
def ref_rand(size, lower, upper):
return st.randint.rvs(lower, upper + 1, size=size)
pymc3_random_discrete(pm.DiscreteUniform, {'lower': -NatSmall, 'upper': NatSmall},
ref_rand=ref_rand)
def test_discrete_weibull(self):
def ref_rand(size, q, beta):
u = np.random.uniform(size=size)
return np.ceil(np.power(np.log(1 - u) / np.log(q), 1. / beta)) - 1
pymc3_random_discrete(pm.DiscreteWeibull, {'q': Unit, 'beta': Rplusdunif},
ref_rand=ref_rand)
@pytest.mark.parametrize('s', [2, 3, 4])
def test_categorical_random(self, s):
def ref_rand(size, p):
return nr.choice(np.arange(p.shape[0]), p=p, size=size)
pymc3_random_discrete(pm.Categorical, {'p': Simplex(s)}, ref_rand=ref_rand)
def test_constant_dist(self):
def ref_rand(size, c):
return c * np.ones(size, dtype=int)
pymc3_random_discrete(pm.Constant, {'c': I}, ref_rand=ref_rand)
def test_mv_normal(self):
def ref_rand(size, mu, cov):
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_tau(size, mu, tau):
return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol, chol.T))
def ref_rand_uchol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol.T, chol))
for n in [2, 3]:
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'cov': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'tau': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_chol)
pymc3_random(
pm.MvNormal,
{'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_uchol,
extra_args={'lower': False}
)
def test_matrix_normal(self):
def ref_rand(size, mu, rowcov, colcov):
return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov, size=size)
# def ref_rand_tau(size, mu, tau):
# return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol, rowchol.T),
colcov=np.dot(colchol, colchol.T))
def ref_rand_uchol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol.T, rowchol),
colcov=np.dot(colchol.T, colchol))
for n in [2, 3]:
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowcov': PdMatrix(n), 'colcov': PdMatrix(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand)
# pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'tau': PdMatrix(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowchol': PdMatrixChol(n), 'colchol': PdMatrixChol(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_chol)
# pymc3_random(
# pm.MvNormal,
# {'mu': RealMatrix(n, n), 'rowchol': PdMatrixCholUpper(n), 'colchol': PdMatrixCholUpper(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_uchol,
# extra_args={'lower': False}
# )
def test_kronecker_normal(self):
def ref_rand(size, mu, covs, sigma):
cov = pm.math.kronecker(covs[0], covs[1]).eval()
cov += sigma**2 * np.identity(cov.shape[0])
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_chol(size, mu, chols, sigma):
covs = [np.dot(chol, chol.T) for chol in chols]
return ref_rand(size, mu, covs, sigma)
def ref_rand_evd(size, mu, evds, sigma):
covs = []
for eigs, Q in evds:
covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))
return ref_rand(size, mu, covs, sigma)
sizes = [2, 3]
sigmas = [0, 1]
for n, sigma in zip(sizes, sigmas):
N = n**2
covs = [RandomPdMatrix(n), RandomPdMatrix(n)]
chols = list(map(np.linalg.cholesky, covs))
evds = list(map(np.linalg.eigh, covs))
dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
std_args = {'mu': mu}
cov_args = {'covs': covs}
chol_args = {'chols': chols}
evd_args = {'evds': evds}
if sigma is not None and sigma != 0:
std_args['sigma'] = Domain([sigma], edges=(None, None))
else:
for args in [cov_args, chol_args, evd_args]:
args['sigma'] = sigma
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand, extra_args=cov_args, model_args=cov_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_chol, extra_args=chol_args,
model_args=chol_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_evd, extra_args=evd_args,
model_args=evd_args)
def test_mv_t(self):
def ref_rand(size, nu, Sigma, mu):
normal = st.multivariate_normal.rvs(cov=Sigma, size=size).T
chi2 = st.chi2.rvs(df=nu, size=size)
return mu + np.sqrt(nu) * (normal / chi2).T
for n in [2, 3]:
pymc3_random(pm.MvStudentT,
{'nu': Domain([5, 10, 25, 50]), 'Sigma': PdMatrix(
n), 'mu': Vector(R, n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
def test_dirichlet(self):
def ref_rand(size, a):
return st.dirichlet.rvs(a, size=size)
for n in [2, 3]:
pymc3_random(pm.Dirichlet, {'a': Vector(Rplus, n)},
valuedomain=Simplex(n), size=100, ref_rand=ref_rand)
def test_multinomial(self):
def ref_rand(size, p, n):
return nr.multinomial(pvals=p, n=n, size=size)
for n in [2, 3]:
pymc3_random_discrete(pm.Multinomial, {'p': Simplex(n), 'n': Nat},
valuedomain=Vector(Nat, n), size=100, ref_rand=ref_rand)
def test_gumbel(self):
def ref_rand(size, mu, beta):
return st.gumbel_r.rvs(loc=mu, scale=beta, size=size)
pymc3_random(pm.Gumbel, {'mu': R, 'beta': Rplus}, ref_rand=ref_rand)
def test_logistic(self):
def ref_rand(size, mu, s):
return st.logistic.rvs(loc=mu, scale=s, size=size)
pymc3_random(pm.Logistic, {'mu': R, 's': Rplus}, ref_rand=ref_rand)
def test_logitnormal(self):
def ref_rand(size, mu, sigma):
return expit(st.norm.rvs(loc=mu, scale=sigma, size=size))
pymc3_random(pm.LogitNormal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
def test_moyal(self):
def ref_rand(size, mu, sigma):
return st.moyal.rvs(loc=mu, scale=sigma, size=size)
pymc3_random(pm.Moyal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
#pylint: disable=cell-var-from-loop
def ref_rand(size):
return st.norm.rvs(loc=mu, scale=sigma, size=size)
class TestedInterpolated (pm.Interpolated):
def __init__(self, **kwargs):
x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)
pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)
super().__init__(
x_points=x_points,
pdf_points=pdf_points,
**kwargs
)
pymc3_random(TestedInterpolated, {}, ref_rand=ref_rand)
@pytest.mark.skip('Wishart random sampling not implemented.\n'
'See https://github.com/pymc-devs/pymc3/issues/538')
def test_wishart(self):
# Wishart non current recommended for use:
# https://github.com/pymc-devs/pymc3/issues/538
# for n in [2, 3]:
# pymc3_random_discrete(Wisvaluedomainhart,
# {'n': Domain([2, 3, 4, 2000]) , 'V': PdMatrix(n) },
# valuedomain=PdMatrix(n),
# ref_rand=lambda n=None, V=None, size=None: \
# st.wishart(V, df=n, size=size))
pass
def test_lkj(self):
for n in [2, 10, 50]:
#pylint: disable=cell-var-from-loop
shape = n*(n-1)//2
def ref_rand(size, eta):
beta = eta - 1 + n/2
return (st.beta.rvs(size=(size, shape), a=beta, b=beta)-.5)*2
class TestedLKJCorr (pm.LKJCorr):
def __init__(self, **kwargs):
kwargs.pop('shape', None)
super().__init__(n=n, **kwargs)
pymc3_random(TestedLKJCorr,
{'eta': Domain([1., 10., 100.])},
size=10000//n,
ref_rand=ref_rand)
def test_normalmixture(self):
def ref_rand(size, w, mu, sigma):
component = np.random.choice(w.size, size=size, p=w)
return np.random.normal(mu[component], sigma[component], size=size)
pymc3_random(pm.NormalMixture, {'w': Simplex(2),
'mu': Domain([[.05, 2.5], [-5., 1.]], edges=(None, None)),
'sigma': Domain([[1, 1], [1.5, 2.]], edges=(None, None))},
extra_args={'comp_shape': 2},
size=1000,
ref_rand=ref_rand)
pymc3_random(pm.NormalMixture, {'w': Simplex(3),
'mu': Domain([[-5., 1., 2.5]], edges=(None, None)),
'sigma': Domain([[1.5, 2., 3.]], edges=(None, None))},
extra_args={'comp_shape': 3},
size=1000,
ref_rand=ref_rand)
def test_mixture_random_shape():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10),
nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))
like0 = pm.Mixture('like0',
w=w0,
comp_dists=comp0,
observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
shape=(20, 2))
w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))
like1 = pm.Mixture('like1',
w=w1,
comp_dists=comp1,
observed=y)
comp2 = pm.Poisson.dist(mu=np.ones(2))
w2 = pm.Dirichlet('w2',
a=np.ones(2),
shape=(20, 2))
like2 = pm.Mixture('like2',
w=w2,
comp_dists=comp2,
observed=y)
comp3 = pm.Poisson.dist(mu=np.ones(2),
shape=(20, 2))
w3 = pm.Dirichlet('w3',
a=np.ones(2),
shape=(20, 2))
like3 = pm.Mixture('like3',
w=w3,
comp_dists=comp3,
observed=y)
rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
point=m.test_point,
size=100)
assert rand0.shape == (100, 20)
assert rand1.shape == (100, 20)
assert rand2.shape == (100, 20)
assert rand3.shape == (100, 20)
with m:
ppc = pm.sample_posterior_predictive([m.test_point], samples=200)
assert ppc['like0'].shape == (200, 20)
assert ppc['like1'].shape == (200, 20)
assert ppc['like2'].shape == (200, 20)
assert ppc['like3'].shape == (200, 20)
@pytest.mark.xfail
def test_mixture_random_shape_fast():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10),
nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))
like0 = pm.Mixture('like0',
w=w0,
comp_dists=comp0,
observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
shape=(20, 2))
w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))
like1 = pm.Mixture('like1',
w=w1,
comp_dists=comp1,
observed=y)
comp2 = pm.Poisson.dist(mu=np.ones(2))
w2 = pm.Dirichlet('w2',
a=np.ones(2),
shape=(20, 2))
like2 = pm.Mixture('like2',
w=w2,
comp_dists=comp2,
observed=y)
comp3 = pm.Poisson.dist(mu=np.ones(2),
shape=(20, 2))
w3 = pm.Dirichlet('w3',
a=np.ones(2),
shape=(20, 2))
like3 = pm.Mixture('like3',
w=w3,
comp_dists=comp3,
observed=y)
rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],
point=m.test_point,
size=100)
assert rand0.shape == (100, 20)
assert rand1.shape == (100, 20)
assert rand2.shape == (100, 20)
assert rand3.shape == (100, 20)
# I *think* that the mixture means that this is not going to work,
# but I could be wrong. [2019/08/22:rpg]
with m:
ppc = pm.fast_sample_posterior_predictive([m.test_point], samples=200)
assert ppc['like0'].shape == (200, 20)
assert ppc['like1'].shape == (200, 20)
assert ppc['like2'].shape == (200, 20)
assert ppc['like3'].shape == (200, 20)
class TestDensityDist():
@pytest.mark.parametrize("shape", [(), (3,), (3, 2)], ids=str)
def test_density_dist_with_random_sampleable(self, shape):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1, shape=shape)
obs = pm.DensityDist(
'density_dist',
normal_dist.logp,
observed=np.random.randn(100, *shape),
shape=shape,
random=normal_dist.random)
trace = pm.sample(100)
samples = 500
size = 100
ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)
assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape
# ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)
# assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape
@pytest.mark.parametrize("shape", [(), (3,), (3, 2)], ids=str)
def test_density_dist_with_random_sampleable_failure(self, shape):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1, shape=shape)
pm.DensityDist(
'density_dist',
normal_dist.logp,
observed=np.random.randn(100, *shape),
shape=shape,
random=normal_dist.random,
wrap_random_with_dist_shape=False
)
trace = pm.sample(100)
samples = 500
with pytest.raises(RuntimeError):
pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)
with pytest.raises((TypeError, RuntimeError)):
pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)
@pytest.mark.parametrize("shape", [(), (3,), (3, 2)], ids=str)
def test_density_dist_with_random_sampleable_hidden_error(self, shape):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1, shape=shape)
obs = pm.DensityDist(
'density_dist',
normal_dist.logp,
observed=np.random.randn(100, *shape),
shape=shape,
random=normal_dist.random,
wrap_random_with_dist_shape=False,
check_shape_in_random=False
)
trace = pm.sample(100)
samples = 500
ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model)
assert len(ppc['density_dist']) == samples
assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape
ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model)
assert len(ppc['density_dist']) == samples
assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape
def test_density_dist_with_random_sampleable_handcrafted_success(self):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1)
rvs = pm.Normal.dist(mu, 1, shape=100).random
obs = pm.DensityDist(
'density_dist',
normal_dist.logp,
observed=np.random.randn(100),
random=rvs,
wrap_random_with_dist_shape=False
)
trace = pm.sample(100)
samples = 500
size = 100
ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)
assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape
@pytest.mark.xfail
def test_density_dist_with_random_sampleable_handcrafted_success_fast(self):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1)
rvs = pm.Normal.dist(mu, 1, shape=100).random
obs = pm.DensityDist(
'density_dist',
normal_dist.logp,
observed=np.random.randn(100),
random=rvs,
wrap_random_with_dist_shape=False
)
trace = pm.sample(100)
samples = 500
size = 100
ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)
assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape
def test_density_dist_without_random_not_sampleable(self):
with pm.Model() as model:
mu = pm.Normal('mu', 0, 1)
normal_dist = pm.Normal.dist(mu, 1)
pm.DensityDist('density_dist', normal_dist.logp, observed=np.random.randn(100))
trace = pm.sample(100)
samples = 500
with pytest.raises(ValueError):
pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)
with pytest.raises((TypeError, ValueError)):
pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)
class TestNestedRandom(SeededTest):
def build_model(self, distribution, shape, nested_rvs_info):
with pm.Model() as model:
nested_rvs = {}
for rv_name, info in nested_rvs_info.items():
try:
value, nested_shape = info
loc = 0.
except ValueError:
value, nested_shape, loc = info
if value is None:
nested_rvs[rv_name] = pm.Uniform(
rv_name,
0 + loc,
1 + loc,
shape=nested_shape,
)
else:
nested_rvs[rv_name] = value * np.ones(nested_shape)
rv = distribution(
"target",
shape=shape,
**nested_rvs,
)
return model, rv, nested_rvs
def sample_prior(
self,
distribution,
shape,
nested_rvs_info,
prior_samples
):
model, rv, nested_rvs = self.build_model(
distribution,
shape,
nested_rvs_info,
)
with model:
return pm.sample_prior_predictive(prior_samples)
@pytest.mark.parametrize(
["prior_samples", "shape", "mu", "alpha"],
[
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, (3,)), (None, tuple())],
[10, (4, 3,), (None, (3,)), (None, (3,))],
[10, (4, 3,), (None, (3,)), (None, (4, 3))],
],
ids=str,
)
def test_NegativeBinomial(
self,
prior_samples,
shape,
mu,
alpha,
):
prior = self.sample_prior(
distribution=pm.NegativeBinomial,
shape=shape,
nested_rvs_info=dict(mu=mu, alpha=alpha),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "psi", "mu", "alpha"],
[
[10, (3,), (0.5, tuple()), (None, tuple()), (None, (3,))],
[10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))],
[10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())],
[10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())],
[10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (3,))],
[10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (4, 3))],
],
ids=str,
)
def test_ZeroInflatedNegativeBinomial(
self,
prior_samples,
shape,
psi,
mu,
alpha,
):
prior = self.sample_prior(
distribution=pm.ZeroInflatedNegativeBinomial,
shape=shape,
nested_rvs_info=dict(psi=psi, mu=mu, alpha=alpha),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "nu", "sigma"],
[
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, tuple()), (None, (3,))],
[10, (3,), (None, (3,)), (None, tuple())],
[10, (3,), (None, (3,)), (None, tuple())],
[10, (4, 3,), (None, (3,)), (None, (3,))],
[10, (4, 3,), (None, (3,)), (None, (4, 3))],
],
ids=str,
)
def test_Rice(
self,
prior_samples,
shape,
nu,
sigma,
):
prior = self.sample_prior(
distribution=pm.Rice,
shape=shape,
nested_rvs_info=dict(nu=nu, sigma=sigma),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "mu", "sigma", "lower", "upper"],
[
[10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],
[10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],
[10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (3,))],
[10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (4, 3))],
[10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],
[10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],
[10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],
[10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))],
[10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (4, 3))],
],
ids=str,
)
def test_TruncatedNormal(
self,
prior_samples,
shape,
mu,
sigma,
lower,
upper,
):
prior = self.sample_prior(
distribution=pm.TruncatedNormal,
shape=shape,
nested_rvs_info=dict(mu=mu, sigma=sigma, lower=lower, upper=upper),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
@pytest.mark.parametrize(
["prior_samples", "shape", "c", "lower", "upper"],
[
[10, (3,), (None, tuple()), (-1., (3,)), (2, tuple())],
[10, (3,), (None, tuple()), (-1., tuple()), (None, tuple(), 1)],
[10, (3,), (None, (3,)), (-1., tuple()), (None, tuple(), 1)],
[10, (4, 3,), (None, (3,)), (-1., tuple()), (None, (3,), 1)],
[10, (4, 3,), (None, (3,)), (None, tuple(), -1), (None, (3,), 1)],
],
ids=str,
|
def test_Triangular(
self,
prior_samples,
shape,
c,
lower,
upper,
):
prior = self.sample_prior(
distribution=pm.Triangular,
shape=shape,
nested_rvs_info=dict(c=c, lower=lower, upper=upper),
prior_samples=prior_samples,
)
assert prior["target"].shape == (prior_samples,) + shape
|
)
|
build.rs
|
/// Routinator UI build procedure
///
/// This file is run when `cargo run` or `cargo build` is issued, by a user
/// in this repo or in a dependent repo (most probably routinator itself).
///
/// It will create a Rust file that contains all the necessary assets files
/// (HTML, CSS, JS) for the routinator UI. That file will be exposed through
/// an API that lives in libs.rs.
extern crate reqwest;
use flate2::read::GzDecoder;
use reqwest::header;
use std::env;
use std::fs;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use tar::Archive;
// URL path and filename of assets file created by Github Actions.
// See .github/workflows/release.yml in this repo.
const DL_URL_PATH: &str = "https://github.com/NLnetLabs/routinator-ui/releases/download";
const DL_FILE_NAME: &str = "routinator-ui-build.tar.gz";
// Filename to use for saving the code generated by this build.rs
const RS_FILE_NAME: &str = "ui-resources.rs";
// URL of files build locally by npm (in the parent directory of this build.rs).
// - rebuild a new version of the UI by issuing `npm run build` in the parent dir,
// (or another buid command, see package.json in the parent)
// - remove the `dist` directory created by npm to have this build.rs use the
// github release file instead.
const SRC_DIR: &str = "../dist";
struct Asset {
path: PathBuf,
content: Vec<u8>,
}
struct Assets(Vec<Asset>);
impl Assets {
fn new() -> Self {
Assets(vec![])
}
fn from_tar_gz(&mut self, tar_gz: Vec<u8>) -> io::Result<()> {
let mut archive = Archive::new(GzDecoder::new(tar_gz.as_slice()));
self.0 = archive
.entries()?
.map(move |e| {
let content: &mut Vec<u8> = &mut vec![];
let mut e = e.ok()?;
e.read_to_end(content).ok()?;
if e.size() > 0 {
Some(Asset {
path: e.path().ok()?.to_path_buf(),
content: content.to_owned(),
})
} else {
None
}
})
.filter_map(|e| e)
.collect();
Ok(())
}
fn from_files(&mut self, dir: std::fs::ReadDir) -> io::Result<()> {
for e in dir {
let entry = e?;
let path = entry.path();
if path.is_dir() {
self.from_files(path.read_dir()?)?;
} else {
let mut content_buf: Vec<u8> = vec![];
fs::File::open(entry.path())?.read_to_end(&mut content_buf)?;
self.0.push(Asset {
path: path
.strip_prefix(SRC_DIR)
.map_or_else(|e| Err(io::Error::new(
io::ErrorKind::Other,
format!(
"routinator-ui: Path of Asset file {:?} does not start with /dist: {}",
&path, e
)
)), |p| Ok(p.to_path_buf()))?,
content: content_buf,
});
}
}
Ok(())
}
fn write_to(self, dest_buf: std::cell::RefCell<std::fs::File>) -> io::Result<()> {
dest_buf.borrow_mut().write_all(
r#"mod ui_resources { pub fn endpoints_as_tuple() -> Vec<(&'static str, &'static [u8])> { vec!["#
.as_bytes(),
)?;
for a in self.0 {
add_asset_to_rs_file_from(a.path, &a.content, dest_buf.borrow_mut())?;
}
dest_buf.borrow_mut().write_all("]} }".as_bytes())?;
Ok(())
}
}
fn
|
() -> Result<Vec<u8>, reqwest::Error> {
let version = env!("CARGO_PKG_VERSION");
let mut headers = header::HeaderMap::new();
headers.insert(
header::USER_AGENT,
header::HeaderValue::from_str(&format!("User-Agent: routinator-ui/{}", version))
.expect("Cannot download routinator-ui-build."),
);
let client = reqwest::blocking::Client::builder()
.default_headers(headers)
.build()?;
let dl_url = format!("{}/v{}/{}", DL_URL_PATH, version, DL_FILE_NAME);
let tar_gz_res = client.get(&dl_url).send()?;
if !tar_gz_res.status().is_success() {
eprintln!(
"routinator-ui: Cannot continue building. The file {} is corrupt.",
&dl_url
);
std::process::exit(1);
}
Ok(tar_gz_res.bytes()?.to_vec())
}
fn add_asset_to_rs_file_from(
src_path: PathBuf,
content_buf: &[u8],
mut ui_buf: std::cell::RefMut<fs::File>,
) -> io::Result<()> {
ui_buf.write_all(format!("(\"{}\",", src_path.to_string_lossy()).as_bytes())?;
// To shorten the content_buf to a smaller slice size to avoid
// building the complete file lengths (for debugging purposes)
// uncomment the /*[..10]*/ part.
ui_buf.write_all(format!("&{:?}", &content_buf /*[10]*/).as_bytes())?;
ui_buf.write_all("),".as_bytes())?;
Ok(())
}
fn get_out_dir() -> Result<String, std::ffi::OsString> {
env::var_os("OUT_DIR")
.ok_or_else(std::ffi::OsString::new)?
.into_string()
}
fn main() {
// build.rs gets rerun only if one of these conditions are met
// - the cargo version in routinator-ui was bumped.
// PLEASE DO NOT DO THIS MANUALLY, BUT USE `npm version patch|minor|major` INSTEAD.
// See the README for more info.
println!("cargo:rerun-if-env-changed=CARGO_PKG_VERSION");
// This build.rs file was changed.
println!("cargo:rerun-if-changed=build.rs");
let rs_file_path: std::path::PathBuf;
if let Ok(out_dir) = get_out_dir() {
rs_file_path = Path::new(&out_dir).join(RS_FILE_NAME.to_string());
} else {
panic!("in the streets of London.");
};
// remove old rs file, if it exists. Will also catch read-only a file-system.
if fs::metadata(&rs_file_path).is_ok() {
if let Err(e) = fs::remove_file(&rs_file_path) {
eprintln!(
"routinator-ui: Cannot continue building. Failed to remove file {:?}: {}. Perhaps this is a read-only file system?",
&rs_file_path, e
);
std::process::exit(1);
}
};
// (re)create the rs file output file.
let rs_file_buf = match fs::File::create(&rs_file_path) {
Ok(f) => std::cell::RefCell::new(f),
Err(e) => {
eprintln!(
"routinator-ui: Cannot continue building. Failed to create file {:?}: {}",
&rs_file_path, e
);
std::process::exit(1);
}
};
// If SRC_DIR exists (which means that a local build was made by Vue),
// use that, otherwise download the assets file built by the release action on GitHub.
let mut assets: Assets = Assets::new();
match fs::read_dir(Path::new(SRC_DIR)) {
Ok(dir) => match assets.from_files(dir) {
Ok(_) => {}
Err(e) => {
eprintln!(
"routinator-ui: Cannot continue building. Failed to read local files from '/dist': {}",
e
);
std::process::exit(1);
}
},
Err(_) => match assets.from_tar_gz(_download_ui_release_build().unwrap()) {
Ok(_) => {}
Err(e) => {
eprintln!(
"routinator-ui: Cannot continue building. Failed to download release from github {:?}: {}",
&rs_file_path, e
);
std::process::exit(1);
}
},
}
// flush the assets to disk in a .rs file
match assets.write_to(rs_file_buf) {
Ok(()) => {}
Err(e) => {
eprintln!(
"routinator-ui: Cannot continue building. Failed to write to file {:?}: {}",
&rs_file_path, e
);
std::process::exit(1);
}
}
}
|
_download_ui_release_build
|
ANN_AllAnalysis_ClimateModels_v4-RandomNoise-TestWarmthGFDL.py
|
"""
ANN for evaluating model biases, differences, and other thresholds using
explainable AI (add warmth/cool GFDL-CM3 model only)
Reference : Barnes et al. [2020, JAMES]
Author : Zachary M. Labe
Date : 20 July 2021
Version : 4 - subsamples random weight class (#8) for mmmean
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import keras.backend as K
from keras.layers import Dense, Activation
from keras import regularizers
from keras import metrics
from keras import optimizers
from keras.models import Sequential
import tensorflow.keras as keras
import tensorflow as tf
import pandas as pd
import random
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import calc_LRPclass as LRP
import innvestigate
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
### Prevent tensorflow 2.+ deprecation warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
### LRP param
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = .001
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directorydataENS = '/Users/zlabe/Data/SMILE/'
directorydataBB = '/Users/zlabe/Data/BEST/'
directorydataEE = '/Users/zlabe/Data/ERA5/'
directoryoutput = '/Users/zlabe/Documents/Research/ModelComparison/Data/'
###############################################################################
###############################################################################
modelGCMs = ['CCCma_canesm2','MPI','CSIRO_MK3.6','KNMI_ecearth',
'GFDL_CM3','GFDL_ESM2M','lens']
datasetsingle = ['SMILE']
dataset_obs = 'ERA5BE'
seasons = ['annual']
variq = 'T2M'
reg_name = 'LowerArctic'
timeper = 'historical'
###############################################################################
###############################################################################
# pickSMILE = ['CCCma_canesm2','CSIRO_MK3.6','KNMI_ecearth',
# 'GFDL_ESM2M','lens']
# pickSMILE = ['CCCma_canesm2','MPI','lens']
pickSMILE = []
if len(pickSMILE) >= 1:
lenOfPicks = len(pickSMILE)
else:
lenOfPicks = len(modelGCMs)
###############################################################################
###############################################################################
land_only = False
ocean_only = False
if land_only == True:
maskNoiseClass = 'land'
elif ocean_only == True:
maskNoiseClass = 'ocean'
else:
maskNoiseClass = 'none'
###############################################################################
###############################################################################
rm_merid_mean = False
rm_annual_mean = False
###############################################################################
###############################################################################
rm_ensemble_mean = False
rm_observational_mean = False
###############################################################################
###############################################################################
calculate_anomalies = False
if calculate_anomalies == True:
if timeper == 'historical':
baseline = np.arange(1951,1980+1,1)
elif timeper == 'future':
baseline = np.arange(2021,2050+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
###############################################################################
###############################################################################
window = 0
ensTypeExperi = 'ENS'
# shuffletype = 'TIMEENS'
# shuffletype = 'ALLENSRAND'
# shuffletype = 'ALLENSRANDrmmean'
shuffletype = 'RANDGAUSS'
sizeOfTwin = 4 # name of experiment for adding noise class #8
if sizeOfTwin > 0:
sizeOfTwinq = 1
else:
sizeOfTwinq = sizeOfTwin
###############################################################################
###############################################################################
factorObs = 10 # factor to add to obs
###############################################################################
###############################################################################
if ensTypeExperi == 'ENS':
if window == 0:
rm_standard_dev = False
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950+window,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020+window,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
elif ensTypeExperi == 'GCM':
if window == 0:
rm_standard_dev = False
yearsall = np.arange(1950,2019+1,1)
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
if timeper == 'historical':
yearsall = np.arange(1950,2019+1,1)
elif timeper == 'future':
yearsall = np.arange(2020,2099+1,1)
else:
print(ValueError('WRONG TIMEPER!'))
sys.exit()
ravelmodeltime = False
ravel_modelens = True
###############################################################################
###############################################################################
numOfEns = 16
lensalso = True
if len(pickSMILE) == 0:
if modelGCMs[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
elif len(pickSMILE) != 0:
if pickSMILE[-1] == 'RANDOM':
randomalso = True
else:
randomalso = False
lentime = len(yearsall)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
num_of_class = lenOfPicks + sizeOfTwinq
###############################################################################
###############################################################################
lrpRule = 'z'
normLRP = True
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Picking experiment to save
typeOfAnalysis = 'issueWithExperiment'
# Experiment #1
if rm_ensemble_mean == True:
if window > 1:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-1'
# Experiment #2
if rm_ensemble_mean == True:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-2'
# Experiment #3 (raw data)
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-3'
if variq == 'T2M':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 20 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 20 # random noise value to add/subtract from each grid point
# Experiment #4
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-4'
if variq == 'T2M':
integer = 25 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 15 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #5
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-5'
# Experiment #6
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == False:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == True:
typeOfAnalysis = 'Experiment-6'
# Experiment #7
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == True:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-7'
# Experiment #8
if rm_ensemble_mean == False:
if window == 0:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-8'
if variq == 'T2M':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'P':
integer = 1 # random noise value to add/subtract from each grid point
elif variq == 'SLP':
integer = 5 # random noise value to add/subtract from each grid point
# Experiment #9
if rm_ensemble_mean == False:
if window > 1:
if calculate_anomalies == True:
if rm_merid_mean == False:
if rm_observational_mean == False:
if rm_annual_mean == False:
typeOfAnalysis = 'Experiment-9'
print('\n<<<<<<<<<<<< Analysis == %s (%s) ! >>>>>>>>>>>>>>>\n' % (typeOfAnalysis,timeper))
if typeOfAnalysis == 'issueWithExperiment':
sys.exit('Wrong parameters selected to analyze')
### Select how to save files
if land_only == True:
saveData = timeper + '_' + seasons[0] + '_LAND' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
elif ocean_only == True:
saveData = timeper + '_' + seasons[0] + '_OCEAN' + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
else:
saveData = timeper + '_' + seasons[0] + '_NoiseTwinSingleMODDIF4_AddingWARMTH-toGFDL%s_' % (factorObs) + typeOfAnalysis + '_' + variq + '_' + reg_name + '_' + dataset_obs + '_' + 'NumOfSMILE-' + str(num_of_class) + '_Method-' + ensTypeExperi
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Create sample class labels for each model for my own testing
### Appends a twin set of classes for the random noise class
if seasons != 'none':
classesl = np.empty((lenOfPicks,numOfEns,len(yearsall)))
for i in range(lenOfPicks):
classesl[i,:,:] = np.full((numOfEns,len(yearsall)),i)
if sizeOfTwin > 0:
### Add random noise models
randomNoiseClass = np.full((sizeOfTwinq,numOfEns,len(yearsall)),i+1)
classesl = np.append(classesl,randomNoiseClass,axis=0)
if ensTypeExperi == 'ENS':
classeslnew = np.swapaxes(classesl,0,1)
elif ensTypeExperi == 'GCM':
classeslnew = classesl
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Begin ANN and the entire script
for sis,singlesimulation in enumerate(datasetsingle):
lrpsns = []
for seas in range(len(seasons)):
###############################################################################
###############################################################################
###############################################################################
### ANN preliminaries
simuqq = datasetsingle[0]
monthlychoice = seasons[seas]
lat_bounds,lon_bounds = UT.regions(reg_name)
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
experiment_result = pd.DataFrame(columns=['actual iters','hiddens','cascade',
'RMSE Train','RMSE Test',
'ridge penalty','zero mean',
'zero merid mean','land only?','ocean only?'])
### Define primary dataset to use
dataset = singlesimulation
modelType = dataset
### Whether to test and plot the results using obs data
if dataset_obs == '20CRv3':
year_obsall = np.arange(yearsall[sis].min(),2015+1,1)
elif dataset_obs == 'ERA5':
year_obsall = np.arange(1979+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1979,2019+1,1)
elif dataset_obs == 'ERA5BE':
year_obsall = np.arange(1950+window,2019+1,1)
if rm_standard_dev == False:
year_obsall = np.arange(1950,2019+1,1)
if monthlychoice == 'DJF':
obsyearstart = year_obsall.min()+1
year_obs = year_obsall[1:]
else:
obsyearstart = year_obsall.min()
year_obs = year_obsall
### Remove the annual mean? True to subtract it from dataset ##########
if rm_annual_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Rove the ensemble mean? True to subtract it from dataset ##########
if rm_ensemble_mean == True:
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/'
### Split the data into training and testing sets? value of 1 will use all
### data as training
segment_data_factor = .75
### Hiddens corresponds to the number of hidden layers the nnet will use - 0
### for linear model, or a list [10, 20, 5] for multiple layers of nodes
### (10 nodes in first layer, 20 in second, etc); The "loop" part
### allows you to loop through multiple architectures. For example,
### hiddens_loop = [[2,4],[0],[1 1 1]] would produce three separate NNs, the
### first with 2 hidden layers of 2 and 4 nodes, the next the linear model,
### and the next would be 3 hidden layers of 1 node each.
### Set useGPU to True to use the GPU, but only if you selected the GPU
### Runtime in the menu at the top of this page
useGPU = False
### Set Cascade to True to utilize the nnet's cascade function
cascade = False
### Plot within the training loop - may want to set to False when testing out
### larget sets of parameters
plot_in_train = False
###############################################################################
###############################################################################
###############################################################################
### Read in model and observational/reanalysis data
def read_primary_dataset(variq,dataset,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,
lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
###############################################################################
### Select data to test, train on
def segment_data(data,classesl,ensTypeExperi,fac = segment_data_factor):
global random_segment_seed,trainIndices,testIndices
if random_segment_seed == None:
random_segment_seed = int(int(np.random.randint(1, 100000)))
np.random.seed(random_segment_seed)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### Large Ensemble experiment
if ensTypeExperi == 'ENS':
### Flip GCM and ensemble member axes
datanew = np.swapaxes(data,0,1)
classeslnew = np.swapaxes(classesl,0,1)
if fac < 1 :
nrows = datanew.shape[0]
segment_train = int(np.round(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'ensembles, testing on',segment_test)
### Picking out random ensembles
i = 0
trainIndices = list()
while i < segment_train:
line = np.random.randint(0, nrows)
if line not in trainIndices:
trainIndices.append(line)
i += 1
else:
pass
i = 0
testIndices = list()
while i < segment_test:
line = np.random.randint(0, nrows)
if line not in trainIndices:
if line not in testIndices:
testIndices.append(line)
i += 1
else:
pass
### Training segment----------
data_train = np.empty((len(trainIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytrain = np.empty((len(trainIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(trainIndices):
data_train[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytrain[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('\nTraining on ensembles: ',trainIndices)
print('Testing on ensembles: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]*data_train.shape[2]),(data_train.shape[3]*data_train.shape[4]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]*Ytrain.shape[2]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((len(testIndices),datanew.shape[1],
datanew.shape[2],datanew.shape[3],
datanew.shape[4]))
Ytest = np.empty((len(testIndices),classeslnew.shape[1],
classeslnew.shape[2]))
for index,ensemble in enumerate(testIndices):
data_test[index,:,:,:,:] = datanew[ensemble,:,:,:,:]
Ytest[index,:,:] = classeslnew[ensemble,:,:]
### Random ensembles are picked
if debug:
print('Training on ensembles: %s' % len(trainIndices))
print('Testing on ensembles: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]*data_test.shape[2]),(data_test.shape[3]*data_test.shape[4]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]*Ytest.shape[2]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
###############################################################################
###############################################################################
###############################################################################
###################################################################
### GCM type experiments without ensembles
elif ensTypeExperi == 'GCM':
if data.ndim == 5:
datanew = np.reshape(data,(data.shape[0]*data.shape[1],data.shape[2],data.shape[3],data.shape[4]))
classeslnew = np.reshape(classesl,(classesl.shape[0]*classesl.shape[1],classesl.shape[2]))
else:
datanew = data
classeslnew = classesl
if fac < 1 :
nrows = datanew.shape[1]
segment_train = int(np.floor(nrows * fac))
segment_test = nrows - segment_train
print('Training on',segment_train,'years, testing on',segment_test)
### Picking out random ensembles
firstyears = int(np.floor(segment_test/2))
lastyears = -int(np.floor(segment_test/2))
trainIndices = np.arange(firstyears,firstyears+segment_train,1)
testIndices = np.append(np.arange(firstyears),np.arange(trainIndices[-1]+1,nrows,1),axis=0)
### Training segment----------
data_train = np.empty((datanew.shape[0],len(trainIndices),
datanew.shape[2],datanew.shape[3]))
Ytrain = np.empty((classeslnew.shape[0],len(trainIndices)))
for index,ensemble in enumerate(trainIndices):
data_train[:,index,:,:] = datanew[:,ensemble,:,:]
Ytrain[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('\nTraining on years: ',trainIndices)
print('Testing on years: ',testIndices)
print('\norg data - shape', datanew.shape)
print('training data - shape', data_train.shape)
### Reshape into X and Y
Xtrain = data_train.reshape((data_train.shape[0]*data_train.shape[1]),(data_train.shape[2]*data_train.shape[3]))
Ytrain = Ytrain.reshape((Ytrain.shape[0]*Ytrain.shape[1]))
Xtrain_shape = (data_train.shape[0])
### Testing segment----------
data_test = np.empty((datanew.shape[0],len(testIndices),
datanew.shape[2],datanew.shape[3]))
Ytest = np.empty((classeslnew.shape[0],len(testIndices)))
for index,ensemble in enumerate(testIndices):
data_test[:,index,:,:] = datanew[:,ensemble,:,:]
Ytest[:,index] = classeslnew[:,ensemble]
### Random ensembles are picked
if debug:
print('Training on years: %s' % len(trainIndices))
print('Testing on years: %s' % len(testIndices))
print('\norg data - shape', datanew.shape)
print('testing data - shape', data_test.shape)
### Reshape into X and Y
Xtest= data_test.reshape((data_test.shape[0]*data_test.shape[1]),(data_test.shape[2]*data_test.shape[3]))
Ytest = Ytest.reshape((Ytest.shape[0]*Ytest.shape[1]))
Xtest_shape = (data_test.shape[0])
Xtest_shape = (data_test.shape[0], data_test.shape[1])
data_train_shape = data_train.shape[0]
data_test_shape = data_test.shape[0]
### 'unlock' the random seed
np.random.seed(None)
### One-hot vectors
Ytrain = keras.utils.to_categorical(Ytrain)
Ytest = keras.utils.to_categorical(Ytest)
### Class weights
class_weight = class_weight_creator(Ytrain)
else:
print(ValueError('WRONG EXPERIMENT!'))
return Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight
###############################################################################
###############################################################################
###############################################################################
### Plotting functions
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
###############################################################################
###############################################################################
###############################################################################
### Create a class weight dictionary to help if the classes are unbalanced
def class_weight_creator(Y):
class_dict = {}
weights = np.max(np.sum(Y, axis=0)) / np.sum(Y, axis=0)
for i in range( Y.shape[-1] ):
class_dict[i] = weights[i]
return class_dict
###############################################################################
###############################################################################
###############################################################################
### Neural Network Creation & Training
class TimeHistory(keras.callbacks.Callback):
|
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def defineNN(hidden, input_shape, output_shape, ridgePenalty):
model = Sequential()
### Initialize first layer
### Model is a single node with activation function
model.add(Dense(hidden[0],input_shape=(input_shape,),
activation=actFun, use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=ridgePenalty),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Initialize other layers
for layer in hidden[1:]:
model.add(Dense(layer,activation=actFun,
use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00,l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
print('\nTHIS IS AN ANN!\n')
#### Initialize output layer
model.add(Dense(output_shape,activation=None,use_bias=True,
kernel_regularizer=regularizers.l1_l2(l1=0.00, l2=0.00),
bias_initializer=keras.initializers.RandomNormal(seed=random_network_seed),
kernel_initializer=keras.initializers.RandomNormal(seed=random_network_seed)))
### Add softmax layer at the end
model.add(Activation('softmax'))
return model
def trainNN(model, Xtrain, Ytrain, niter, class_weight, verbose):
global lr_here, batch_size
lr_here = 0.001
model.compile(optimizer=optimizers.SGD(lr=lr_here,
momentum=0.9,nesterov=True),
loss = 'categorical_crossentropy',
metrics=[metrics.categorical_accuracy])
# model.compile(optimizer=optimizers.Nadam(lr=lr_here),
# loss = 'categorical_crossentropy',
# metrics=[metrics.categorical_accuracy])
### Declare the relevant model parameters
batch_size = 24
print('----ANN Training: learning rate = '+str(lr_here)+'; activation = '+actFun+'; batch = '+str(batch_size) + '----')
### Callbacks
time_callback = TimeHistory()
early_stopping = keras.callbacks.EarlyStopping(monitor='loss',
patience=2,
verbose=1,
mode='auto')
history = model.fit(Xtrain,Ytrain,batch_size=batch_size,epochs=niter,
shuffle=True,verbose=verbose,
callbacks=[time_callback,early_stopping],
validation_split=0.)
print('******** done training ***********')
return model, history
def test_train_loopClass(Xtrain,Ytrain,Xtest,Ytest,iterations,ridge_penalty,hiddens,class_weight,plot_in_train=True):
"""or loops to iterate through training iterations, ridge penalty,
and hidden layer list
"""
results = {}
global nnet,random_network_seed
for niter in iterations:
for penalty in ridge_penalty:
for hidden in hiddens:
### Check / use random seed
if random_network_seed == None:
np.random.seed(None)
random_network_seed = int(np.random.randint(1, 100000))
np.random.seed(random_network_seed)
random.seed(random_network_seed)
tf.set_random_seed(0)
### Standardize the data
Xtrain,Xtest,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean,Xstd = stdVals
### Define the model
model = defineNN(hidden,
input_shape=np.shape(Xtrain)[1],
output_shape=np.shape(Ytrain)[1],
ridgePenalty=penalty)
### Train the net
model, history = trainNN(model,Xtrain,
Ytrain,niter,class_weight,verbose=1)
### After training, use the network with training data to
### check that we don't have any errors and output RMSE
rmse_train = dSS.rmse(Ytrain,model.predict(Xtrain))
if type(Ytest) != bool:
rmse_test = 0.
rmse_test = dSS.rmse(Ytest,model.predict(Xtest))
else:
rmse_test = False
this_result = {'iters': niter,
'hiddens' : hidden,
'RMSE Train' : rmse_train,
'RMSE Test' : rmse_test,
'ridge penalty': penalty,
'zero mean' : rm_annual_mean,
'zero merid mean' : rm_merid_mean,
'land only?' : land_only,
'ocean only?' : ocean_only,
'Segment Seed' : random_segment_seed,
'Network Seed' : random_network_seed }
results.update(this_result)
global experiment_result
experiment_result = experiment_result.append(results,
ignore_index=True)
#if True to plot each iter's graphs.
if plot_in_train == True:
plt.figure()
plt.subplot(1,1,1)
plt.plot(history.history['loss'],label = 'training')
plt.title(history.history['loss'][-1])
plt.xlabel('epoch')
plt.xlim(2,len(history.history['loss'])-1)
plt.legend()
plt.grid(True)
plt.show()
#'unlock' the random seed
np.random.seed(None)
random.seed(None)
tf.set_random_seed(None)
return experiment_result, model
###############################################################################
###############################################################################
###############################################################################
### Results
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
### Parameters
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[10,10]]
ridge_penalty = [0.1]
# hiddensList = [[8,8]]
# ridge_penalty = [0.2]
actFun = 'relu'
if any([maskNoiseClass=='land',maskNoiseClass=='ocean']):
debug = True
NNType = 'ANN'
avgHalfChunk = 0
option4 = True
biasBool = False
hiddensList = [[8,8]]
ridge_penalty = [0.10]
actFun = 'relu'
expList = [(0)] # (0,1)
expN = np.size(expList)
iterations = [100]
random_segment = True
foldsN = 1
for avgHalfChunk in (0,):
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
K.clear_session()
for loop in ([0]):
### Get info about the region
lat_bounds,lon_bounds = UT.regions(reg_name)
data_all,lats,lons = read_primary_dataset(variq,dataset,
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
data_obs_all,lats_obs,lons_obs = read_obs_dataset(variq,
dataset_obs,
numOfEns,
lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
###############################################################################
###############################################################################
###############################################################################
for exp in expList:
### Get the data together
data, data_obs, = data_all, data_obs_all,
###############################################################################
if len(pickSMILE) >= 1:
data = dSS.pickSmileModels(data,modelGCMs,pickSMILE)
print('\n*Pick models to analysis from %s*\n' % pickSMILE)
###############################################################################
if calculate_anomalies == True:
data, data_obs = dSS.calculate_anomalies(data,data_obs,
lats,lons,baseline,yearsall)
print('\n*Calculate anomalies for %s-%s*\n' % (baseline.min(),baseline.max()))
###############################################################################
if rm_annual_mean == True:
data, data_obs = dSS.remove_annual_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed annual mean*\n')
###############################################################################
if rm_merid_mean == True:
data, data_obs = dSS.remove_merid_mean(data,data_obs,
lats,lons,
lats_obs,lons_obs)
print('\n*Removed meridional mean*\n')
###############################################################################
if rm_ensemble_mean == True:
data = dSS.remove_ensemble_mean(data,ravel_modelens,
ravelmodeltime,
rm_standard_dev,
numOfEns)
print('\n*Removed ensemble mean*')
###############################################################################
if rm_standard_dev == True:
data = dSS.rm_standard_dev(data,window,ravelmodeltime,
numOfEns)
print('\n*Removed standard deviation*')
###############################################################################
if rm_observational_mean == True:
data = dSS.remove_observations_mean(data,data_obs,lats,lons)
print('\n*Removed observational data*')
###############################################################################
if land_only == True:
data, data_obs = dSS.remove_ocean(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed ocean data*')
###############################################################################
if ocean_only == True:
data, data_obs = dSS.remove_land(data,data_obs,
lat_bounds,
lon_bounds)
print('\n*Removed land data*')
###############################################################################
### Adding random data
if sizeOfTwin > 0:
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
data = dSS.addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Modify the GFDL-CM3 model for warmth and cooling that model only
print('\n <<< FACTOR FOR OBS IS %s! >>>\n' % factorObs)
if factorObs == 0:
data = data
elif factorObs == 1: # warm its mean state
GFDL = data[4,:,:,:,:]
GFDLwarmer = GFDL + 3
data[4,:,:,:,:] = GFDLwarmer
elif factorObs == 2: # cool its mean state
GFDL = data[4,:,:,:,:]
GFDLcooler = GFDL - 3
data[4,:,:,:,:] = GFDLcooler
elif factorObs == 3: # warm recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 4: # cool recent 10 years
GFDL = data[4,:,:,:,:]
GFDLbefore = GFDL[:,:-10,:,:]
GFDLafter = GFDL[:,-10:,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 5: # warm the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
warmerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) + 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + warmerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 6: # cool the North Pole
sizeofNP = 10
GFDL = data[4,:,:,:,:]
coolerNP = np.zeros((GFDL.shape[0],GFDL.shape[1],GFDL.shape[2]-sizeofNP,GFDL.shape[3])) - 5
addtoclimoNP = GFDL[:,:,sizeofNP:,:] + coolerNP
GFDL[:,:,sizeofNP:,:] = addtoclimoNP
data[4,:,:,:,:] = GFDL
elif factorObs == 7: # warm the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
warmerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) + 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + warmerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 8: # cool the Lower Arctic
sizeofLA = 5
GFDL = data[4,:,:,:,:]
coolerLA = np.zeros((GFDL.shape[0],GFDL.shape[1],sizeofLA,GFDL.shape[3])) - 5
addtoclimoLA = GFDL[:,:,:sizeofLA,:] + coolerLA
GFDL[:,:,:sizeofLA,:] = addtoclimoLA
data[4,:,:,:,:] = GFDL
elif factorObs == 9: # warm early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] + 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
elif factorObs == 10: # cool early 50 years
GFDL = data[4,:,:,:,:]
GFDLafter = GFDL[:,50:,:,:]
GFDLbefore = GFDL[:,:50,:,:] - 3
GFDLq = np.append(GFDLbefore,GFDLafter,axis=1)
data[4,:,:,:,:] = GFDLq
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Loop over folds
for loop in np.arange(0,foldsN):
K.clear_session()
#---------------------------
# random_segment_seed = 34515
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/ModelComparison/Data/SelectedSegmentSeed.txt',unpack=True))
#---------------------------
Xtrain,Ytrain,Xtest,Ytest,Xtest_shape,Xtrain_shape,data_train_shape,data_test_shape,testIndices,trainIndices,class_weight = segment_data(data,classesl,ensTypeExperi,segment_data_factor)
YtrainClassMulti = Ytrain
YtestClassMulti = Ytest
# For use later
XtrainS,XtestS,stdVals = dSS.standardize_data(Xtrain,Xtest)
Xmean, Xstd = stdVals
#---------------------------
random_network_seed = 87750
#---------------------------
# Create and train network
exp_result,model = test_train_loopClass(Xtrain,
YtrainClassMulti,
Xtest,
YtestClassMulti,
iterations=iterations,
ridge_penalty=ridge_penalty,
hiddens=hiddensList,class_weight=class_weight,
plot_in_train = True)
model.summary()
################################################################################################################################################
# save the model
dirname = '/Users/zlabe/Desktop/ModelComparison_v1/'
savename = modelType+'_'+variq+'_kerasMultiClassBinaryOption4'+'_' + NNType + '_L2_'+ str(ridge_penalty[0])+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(iterations[0]) + '_' + str(hiddensList[0][0]) + 'x' + str(hiddensList[0][-1]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
savenameModelTestTrain = modelType+'_'+variq+'_modelTrainTest_SegSeed'+str(random_segment_seed)+'_NetSeed'+str(random_network_seed)
if(reg_name=='Globe'):
regSave = ''
else:
regSave = '_' + reg_name
if(rm_annual_mean==True):
savename = savename + '_AnnualMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_AnnualMeanRemoved'
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
savenameModelTestTrain = savenameModelTestTrain + '_EnsembleMeanRemoved'
savename = savename + regSave
# model.save(dirname + savename + '.h5')
# np.savez(dirname + savenameModelTestTrain + '.npz',trainModels=trainIndices,testModels=testIndices,Xtrain=Xtrain,Ytrain=Ytrain,Xtest=Xtest,Ytest=Ytest,Xmean=Xmean,Xstd=Xstd,lats=lats,lons=lons)
print('saving ' + savename)
###############################################################
### Make final plot
### Get obs
dataOBSERVATIONS = data_obs
latsOBSERVATIONS = lats_obs
lonsOBSERVATIONS = lons_obs
Xobs = dataOBSERVATIONS.reshape(dataOBSERVATIONS.shape[0],dataOBSERVATIONS.shape[1]*dataOBSERVATIONS.shape[2])
annType = 'class'
if monthlychoice == 'DJF':
startYear = yearsall[sis].min()+1
endYear = yearsall[sis].max()
else:
startYear = yearsall[sis].min()
endYear = yearsall[sis].max()
years = np.arange(startYear,endYear+1,1)
Xmeanobs = np.nanmean(Xobs,axis=0)
Xstdobs = np.nanstd(Xobs,axis=0)
XobsS = (Xobs-Xmeanobs)/Xstdobs
XobsS[np.isnan(XobsS)] = 0
xtrainpred = (Xtrain-Xmean)/Xstd
xtrainpred[np.isnan(xtrainpred)] = 0
xtestpred = (Xtest-Xmean)/Xstd
xtestpred[np.isnan(xtestpred)] = 0
if(annType=='class'):
YpredObs = model.predict(XobsS)
YpredTrain = model.predict(xtrainpred)
YpredTest = model.predict(xtestpred)
#######################################################
#######################################################
#######################################################
### Check null hypothesis of random data!
randarray,latsra,lonsra = read_primary_dataset(variq,'RANDOM',
numOfEns,lensalso,
randomalso,
ravelyearsbinary,
ravelbinary,
shuffletype,
lat_bounds,
lon_bounds)
randarrayn = randarray.reshape(randarray.shape[0],randarray.shape[1]*randarray.shape[2])
randarraymean = np.nanmean(randarrayn,axis=0)
randarraystd = np.nanstd(randarrayn,axis=0)
randarrayS = (randarrayn-randarraymean)/randarraystd
### Prediction on random data
YpredRand = model.predict(randarrayS)
#######################################################
#######################################################
#######################################################
### Get output from model
trainingout = YpredTrain
testingout = YpredTest
if ensTypeExperi == 'ENS':
classesltrain = classeslnew[trainIndices,:,:].ravel()
classesltest = classeslnew[testIndices,:,:].ravel()
elif ensTypeExperi == 'GCM':
classesltrain = classeslnew[:,:,trainIndices].ravel()
classesltest = classeslnew[:,:,testIndices].ravel()
### Random data tests
randout = YpredRand
labelsrand = np.argmax(randout,axis=1)
uniquerand,countrand = np.unique(labelsrand,return_counts=True)
np.savetxt(directoryoutput + 'RandLabels_' + saveData + '.txt',labelsrand)
np.savetxt(directoryoutput + 'RandConfid_' + saveData + '.txt',randout)
### Observations
obsout = YpredObs
labelsobs = np.argmax(obsout,axis=1)
uniqueobs,countobs = np.unique(labelsobs,return_counts=True)
print(labelsobs)
np.savetxt(directoryoutput + 'obsLabels_' + saveData + '.txt',labelsobs)
np.savetxt(directoryoutput + 'obsConfid_' + saveData + '.txt',obsout)
def truelabel(data):
"""
Calculate argmax
"""
maxindexdata= np.argmax(data[:,:],axis=1)
return maxindexdata
def accuracyTotalTime(data_pred,data_true):
"""
Compute accuracy for the entire time series
"""
data_truer = data_true
data_predr = data_pred
accdata_pred = accuracy_score(data_truer,data_predr)
return accdata_pred
##############################################################################
##############################################################################
##############################################################################
indextrain = truelabel(trainingout)
acctrain = accuracyTotalTime(indextrain,classesltrain)
indextest = truelabel(testingout)
acctest = accuracyTotalTime(indextest,classesltest)
print('\n\nAccuracy Training == ',acctrain)
print('Accuracy Testing == ',acctest)
## Save the output for plotting
np.savetxt(directoryoutput + 'trainingEnsIndices_' + saveData + '.txt',trainIndices)
np.savetxt(directoryoutput + 'testingEnsIndices_' + saveData + '.txt',testIndices)
np.savetxt(directoryoutput + 'trainingTrueLabels_' + saveData + '.txt',classesltrain)
np.savetxt(directoryoutput + 'testingTrueLabels_' + saveData + '.txt',classesltest)
np.savetxt(directoryoutput + 'trainingPredictedLabels_' + saveData + '.txt',indextrain)
np.savetxt(directoryoutput + 'testingPredictedLabels_' + saveData + '.txt',indextest)
### See more more details
model.layers[0].get_config()
## Define variable for analysis
print('\n\n------------------------')
print(variq,'= Variable!')
print(monthlychoice,'= Time!')
print(reg_name,'= Region!')
print(lat_bounds,lon_bounds)
print(dataset,'= Model!')
print(dataset_obs,'= Observations!\n')
print(rm_annual_mean,'= rm_annual_mean')
print(rm_merid_mean,'= rm_merid_mean')
print(rm_ensemble_mean,'= rm_ensemble_mean')
print(land_only,'= land_only')
print(ocean_only,'= ocean_only')
## Variables for plotting
lons2,lats2 = np.meshgrid(lons,lats)
observations = data_obs
modeldata = data
modeldatamean = np.nanmean(modeldata,axis=1)
spatialmean_obs = UT.calc_weightedAve(observations,lats2)
spatialmean_mod = UT.calc_weightedAve(modeldata,lats2)
spatialmean_modmean = np.nanmean(spatialmean_mod,axis=1)
plt.figure()
plt.plot(yearsall,spatialmean_modmean.transpose())
plt.plot(yearsall,spatialmean_modmean.transpose()[:,4],linewidth=3,color='red',label=r'GFDL-CM3 - %s-Experiment' % factorObs)
plt.xlabel('Years')
plt.ylabel('Average Arctic Temperature')
plt.legend()
plt.ylim([-14.5,-1])
plt.savefig('/Users/zlabe/Desktop/factor-%s.png' % factorObs,dpi=300)
plt.figure()
plt.plot(spatialmean_obs)
##############################################################################
##############################################################################
##############################################################################
## Visualizing through LRP
numLats = lats.shape[0]
numLons = lons.shape[0]
numDim = 3
##############################################################################
##############################################################################
##############################################################################
lrpall = LRP.calc_LRPModel(model,np.append(XtrainS,XtestS,axis=0),
np.append(Ytrain,Ytest,axis=0),
biasBool,annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
meanlrp = np.nanmean(lrpall,axis=0)
fig=plt.figure()
plt.contourf(meanlrp,300,cmap=cmocean.cm.thermal)
### For training data only
lrptrain = LRP.calc_LRPModel(model,XtrainS,Ytrain,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For training data only
lrptest = LRP.calc_LRPModel(model,XtestS,Ytest,biasBool,
annType,num_of_class,
yearsall,lrpRule,normLRP,
numLats,numLons,numDim)
### For observations data only
lrpobservations = LRP.calc_LRPObs(model,XobsS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
### For random data only
lrprandom = LRP.calc_LRPObs(model,randarrayS,biasBool,annType,
num_of_class,yearsall,lrpRule,
normLRP,numLats,numLons,numDim)
##############################################################################
##############################################################################
##############################################################################
def netcdfLRP(lats,lons,var,directory,typemodel,saveData):
print('\n>>> Using netcdfLRP function!')
from netCDF4 import Dataset
import numpy as np
name = 'LRPMap' + typemodel + '_' + saveData + '.nc'
filename = directory + name
ncfile = Dataset(filename,'w',format='NETCDF4')
ncfile.description = 'LRP maps for using selected seed'
### Dimensions
ncfile.createDimension('years',var.shape[0])
ncfile.createDimension('lat',var.shape[1])
ncfile.createDimension('lon',var.shape[2])
### Variables
years = ncfile.createVariable('years','f4',('years'))
latitude = ncfile.createVariable('lat','f4',('lat'))
longitude = ncfile.createVariable('lon','f4',('lon'))
varns = ncfile.createVariable('LRP','f4',('years','lat','lon'))
### Units
varns.units = 'unitless relevance'
ncfile.title = 'LRP relevance'
ncfile.instituion = 'Colorado State University'
ncfile.references = 'Barnes et al. [2020]'
### Data
years[:] = np.arange(var.shape[0])
latitude[:] = lats
longitude[:] = lons
varns[:] = var
ncfile.close()
print('*Completed: Created netCDF4 File!')
netcdfLRP(lats,lons,lrpall,directoryoutput,'AllData',saveData)
netcdfLRP(lats,lons,lrptrain,directoryoutput,'Training',saveData)
netcdfLRP(lats,lons,lrptest,directoryoutput,'Testing',saveData)
netcdfLRP(lats,lons,lrpobservations,directoryoutput,'Obs',saveData)
|
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
|
resource_iteration_permissions_test.go
|
//go:build (all || permissions || resource_iteration_permissions) && (!exclude_permissions || !exclude_resource_iteration_permissions)
// +build all permissions resource_iteration_permissions
// +build !exclude_permissions !exclude_resource_iteration_permissions
package acceptancetests
import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/acceptancetests/testutils"
"github.com/microsoft/terraform-provider-azuredevops/azuredevops/internal/utils/datahelper"
)
func hclIterationPermissions(projectName string, permissions map[string]map[string]string) string {
rootPermissions := datahelper.JoinMap(permissions["root"], "=", "\n")
iterationPermissions := datahelper.JoinMap(permissions["iteration"], "=", "\n")
return fmt.Sprintf(`
%s
data "azuredevops_group" "tf-project-readers" {
project_id = azuredevops_project.project.id
name = "Readers"
}
resource "azuredevops_iteration_permissions" "root-permissions" {
project_id = azuredevops_project.project.id
principal = data.azuredevops_group.tf-project-readers.id
permissions = {
%s
}
}
resource "azuredevops_iteration_permissions" "iteration-permissions" {
project_id = azuredevops_project.project.id
principal = data.azuredevops_group.tf-project-readers.id
path = "Iteration 1"
permissions = {
%s
}
}
`, testutils.HclProjectResource(projectName), rootPermissions, iterationPermissions)
}
func TestAccIterationPermissions_SetPermissions(t *testing.T) {
projectName := testutils.GenerateResourceName()
config := hclIterationPermissions(projectName, map[string]map[string]string{
"root": {
"CREATE_CHILDREN": "Deny",
"GENERIC_READ": "NotSet",
"DELETE": "Deny",
},
"iteration": {
"CREATE_CHILDREN": "Allow",
"GENERIC_READ": "NotSet",
"DELETE": "Allow",
},
})
tfNodeRoot := "azuredevops_iteration_permissions.root-permissions"
tfNodeIteration := "azuredevops_iteration_permissions.iteration-permissions"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testutils.PreCheck(t, nil) },
Providers: testutils.GetProviders(),
CheckDestroy: testutils.CheckProjectDestroyed,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testutils.CheckProjectExists(projectName),
resource.TestCheckResourceAttrSet(tfNodeRoot, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeRoot, "principal"),
resource.TestCheckNoResourceAttr(tfNodeRoot, "path"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.CREATE_CHILDREN", "deny"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.GENERIC_READ", "notset"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.DELETE", "deny"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "principal"),
resource.TestCheckResourceAttr(tfNodeIteration, "path", "Iteration 1"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.CREATE_CHILDREN", "allow"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.GENERIC_READ", "notset"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.DELETE", "allow"),
),
},
},
})
}
func TestAccIterationPermissions_UpdatePermissions(t *testing.T)
|
{
projectName := testutils.GenerateResourceName()
config1 := hclIterationPermissions(projectName, map[string]map[string]string{
"root": {
"CREATE_CHILDREN": "Deny",
"GENERIC_READ": "NotSet",
"DELETE": "Deny",
},
"iteration": {
"CREATE_CHILDREN": "Allow",
"GENERIC_READ": "NotSet",
"DELETE": "Allow",
},
})
config2 := hclIterationPermissions(projectName, map[string]map[string]string{
"root": {
"CREATE_CHILDREN": "Allow",
"GENERIC_READ": "NotSet",
"DELETE": "Deny",
},
"iteration": {
"CREATE_CHILDREN": "Deny",
"GENERIC_READ": "Allow",
"DELETE": "NotSet",
},
})
tfNodeRoot := "azuredevops_iteration_permissions.root-permissions"
tfNodeIteration := "azuredevops_iteration_permissions.iteration-permissions"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testutils.PreCheck(t, nil) },
Providers: testutils.GetProviders(),
CheckDestroy: testutils.CheckProjectDestroyed,
Steps: []resource.TestStep{
{
Config: config1,
Check: resource.ComposeTestCheckFunc(
testutils.CheckProjectExists(projectName),
resource.TestCheckResourceAttrSet(tfNodeRoot, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeRoot, "principal"),
resource.TestCheckNoResourceAttr(tfNodeRoot, "path"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.CREATE_CHILDREN", "deny"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.GENERIC_READ", "notset"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.DELETE", "deny"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "principal"),
resource.TestCheckResourceAttr(tfNodeIteration, "path", "Iteration 1"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.CREATE_CHILDREN", "allow"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.GENERIC_READ", "notset"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.DELETE", "allow"),
),
},
{
Config: config2,
Check: resource.ComposeTestCheckFunc(
testutils.CheckProjectExists(projectName),
resource.TestCheckResourceAttrSet(tfNodeRoot, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeRoot, "principal"),
resource.TestCheckNoResourceAttr(tfNodeRoot, "path"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.CREATE_CHILDREN", "allow"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.GENERIC_READ", "notset"),
resource.TestCheckResourceAttr(tfNodeRoot, "permissions.DELETE", "deny"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "project_id"),
resource.TestCheckResourceAttrSet(tfNodeIteration, "principal"),
resource.TestCheckResourceAttr(tfNodeIteration, "path", "Iteration 1"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.%", "3"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.CREATE_CHILDREN", "deny"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.GENERIC_READ", "allow"),
resource.TestCheckResourceAttr(tfNodeIteration, "permissions.DELETE", "notset"),
),
},
},
})
}
|
|
han_conv.py
|
from typing import Union, Dict, Optional, List
import torch
from torch import Tensor, nn
import torch.nn.functional as F
from torch_geometric.typing import NodeType, EdgeType, Metadata, Adj
from torch_geometric.nn.dense import Linear
from torch_geometric.utils import softmax
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot, reset
def group(xs: List[Tensor], q: nn.Parameter,
k_lin: nn.Module) -> Optional[Tensor]:
if len(xs) == 0:
return None
else:
num_edge_types = len(xs)
out = torch.stack(xs)
attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1)
attn = F.softmax(attn_score, dim=0)
out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0)
return out
class HANConv(MessagePassing):
r"""
The Heterogenous Graph Attention Operator from the
`"Heterogenous Graph Attention Network"
<https://arxiv.org/pdf/1903.07293.pdf>`_ paper.
.. note::
For an example of using HANConv, see `examples/hetero/han_imdb.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
hetero/han_imdb.py>`_.
Args:
in_channels (int or Dict[str, int]): Size of each input sample of every
node type, or :obj:`-1` to derive the size from the first input(s)
to the forward method.
out_channels (int): Size of each output sample.
metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata
of the heterogeneous graph, *i.e.* its node and edge types given
by a list of strings and a list of string triplets, respectively.
See :meth:`torch_geometric.data.HeteroData.metadata` for more
information.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
negative_slope (float, optional): LeakyReLU angle of the negative
slope. (default: :obj:`0.2`)
dropout (float, optional): Dropout probability of the normalized
attention coefficients which exposes each node to a stochastically
sampled neighborhood during training. (default: :obj:`0`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(
self,
in_channels: Union[int, Dict[str, int]],
out_channels: int,
metadata: Metadata,
heads: int = 1,
negative_slope=0.2,
dropout: float = 0.0,
**kwargs,
):
|
def reset_parameters(self):
reset(self.proj)
glorot(self.lin_src)
glorot(self.lin_dst)
self.k_lin.reset_parameters()
glorot(self.q)
def forward(
self, x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[EdgeType,
Adj]) -> Dict[NodeType, Optional[Tensor]]:
r"""
Args:
x_dict (Dict[str, Tensor]): A dictionary holding input node
features for each individual node type.
edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A
dictionary holding graph connectivity information for each
individual edge type, either as a :obj:`torch.LongTensor` of
shape :obj:`[2, num_edges]` or a
:obj:`torch_sparse.SparseTensor`.
:rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings
for each node type.
In case a node type does not receive any message, its output will
be set to :obj:`None`.
"""
H, D = self.heads, self.out_channels // self.heads
x_node_dict, out_dict = {}, {}
# Iterate over node types:
for node_type, x_node in x_dict.items():
x_node_dict[node_type] = self.proj[node_type](x_node).view(
-1, H, D)
out_dict[node_type] = []
# Iterate over edge types:
for edge_type, edge_index in edge_index_dict.items():
src_type, _, dst_type = edge_type
edge_type = '__'.join(edge_type)
lin_src = self.lin_src[edge_type]
lin_dst = self.lin_dst[edge_type]
x_dst = x_node_dict[dst_type]
alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1)
alpha_dst = (x_dst * lin_dst).sum(dim=-1)
alpha = (alpha_src, alpha_dst)
# propagate_type: (x_dst: Tensor, alpha: PairTensor)
out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha,
size=None)
out = F.relu(out)
out_dict[dst_type].append(out)
# iterate over node types:
for node_type, outs in out_dict.items():
out = group(outs, self.q, self.k_lin)
if out is None:
out_dict[node_type] = None
continue
out_dict[node_type] = out
return out_dict
def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor,
index: Tensor, ptr: Optional[Tensor],
size_i: Optional[int]) -> Tensor:
alpha = alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
out = x_dst_i * alpha.view(-1, self.heads, 1)
return out.view(-1, self.out_channels)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.out_channels}, '
f'heads={self.heads})')
|
super().__init__(aggr='add', node_dim=0, **kwargs)
if not isinstance(in_channels, dict):
in_channels = {node_type: in_channels for node_type in metadata[0]}
self.heads = heads
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.metadata = metadata
self.dropout = dropout
self.k_lin = nn.Linear(out_channels, out_channels)
self.q = nn.Parameter(torch.Tensor(1, out_channels))
self.proj = nn.ModuleDict()
for node_type, in_channels in self.in_channels.items():
self.proj[node_type] = Linear(in_channels, out_channels)
self.lin_src = nn.ParameterDict()
self.lin_dst = nn.ParameterDict()
dim = out_channels // heads
for edge_type in metadata[1]:
edge_type = '__'.join(edge_type)
self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim))
self.reset_parameters()
|
day_07_the_treachery_of_whales.go
|
package year2021
import (
"math"
"github.com/lanphiergm/adventofcodego/internal/utils"
)
// The Treachery of Whales Part 1 computes the minimum fuel consumption assuming a
// constant cost function
func TheTreacheryofWhalesPart1(filename string) interface{} {
return findMinFuelConsumption(filename, sumLinearFuelConsumption)
}
// The Treachery of Whales Part 2 computes the minimum fuel consumption assuming an
// increasing cost function
func TheTreacheryofWhalesPart2(filename string) interface{} {
return findMinFuelConsumption(filename, sumIncreasingFuelConsumption)
}
func findMinFuelConsumption(filename string, sumFuel sumFuelConsumptionFunc) int {
positions := utils.ReadCsv(filename)
min, max := utils.GetExtrema(positions)
minFuel := utils.MaxInt
for i := min; i <= max; i++ {
fuel := sumFuel(positions, i)
if fuel < minFuel
|
}
return minFuel
}
// Define the functions for computing fuel consumption
type sumFuelConsumptionFunc func(positions []int, rallyPoint int) int
func sumLinearFuelConsumption(positions []int, rallyPoint int) int {
fuel := 0
for i := 0; i < len(positions); i++ {
fuel += int(math.Abs(float64(positions[i] - rallyPoint)))
}
return fuel
}
func sumIncreasingFuelConsumption(positions []int, rallyPoint int) int {
fuel := 0
for i := 0; i < len(positions); i++ {
fuel += utils.Summorial(int(math.Abs(float64(positions[i] - rallyPoint))))
}
return fuel
}
|
{
minFuel = fuel
}
|
xds_routes.go
|
package controlplane
import (
"fmt"
"net/url"
"sort"
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_config_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_type_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
"github.com/pomerium/pomerium/config"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/urlutil"
)
const (
httpCluster = "pomerium-control-plane-http"
)
func (srv *Server) buildGRPCRoutes() ([]*envoy_config_route_v3.Route, error) {
action := &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: "pomerium-control-plane-grpc",
},
},
}
return []*envoy_config_route_v3.Route{{
Name: "pomerium-grpc",
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{
Prefix: "/",
},
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
},
Action: action,
TypedPerFilterConfig: map[string]*any.Any{
"envoy.filters.http.ext_authz": disableExtAuthz,
},
}}, nil
}
func (srv *Server) buildPomeriumHTTPRoutes(options *config.Options, domain string) ([]*envoy_config_route_v3.Route, error) {
var routes []*envoy_config_route_v3.Route
// enable ext_authz
r, err := srv.buildControlPlanePathRoute("/.pomerium/jwt", true)
if err != nil {
return nil, err
}
routes = append(routes, r)
// disable ext_authz and passthrough to proxy handlers
r, err = srv.buildControlPlanePathRoute("/ping", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePathRoute("/healthz", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePathRoute("/.pomerium", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePrefixRoute("/.pomerium/", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePathRoute("/.well-known/pomerium", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePrefixRoute("/.well-known/pomerium/", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
// per #837, only add robots.txt if there are no unauthenticated routes
if !hasPublicPolicyMatchingURL(options, url.URL{Scheme: "https", Host: domain, Path: "/robots.txt"}) {
r, err := srv.buildControlPlanePathRoute("/robots.txt", false)
if err != nil {
return nil, err
}
routes = append(routes, r)
}
// if we're handling authentication, add the oauth2 callback url
authenticateURL, err := options.GetAuthenticateURL()
if err != nil {
return nil, err
}
if config.IsAuthenticate(options.Services) && hostMatchesDomain(authenticateURL, domain) {
r, err := srv.buildControlPlanePathRoute(options.AuthenticateCallbackPath, false)
if err != nil {
return nil, err
}
routes = append(routes, r)
}
// if we're the proxy and this is the forward-auth url
forwardAuthURL, err := options.GetForwardAuthURL()
if err != nil {
return nil, err
}
if config.IsProxy(options.Services) && options.ForwardAuthURL != nil && hostMatchesDomain(forwardAuthURL, domain) {
// disable ext_authz and pass request to proxy handlers that enable authN flow
r, err := srv.buildControlPlanePathAndQueryRoute("/verify", []string{urlutil.QueryForwardAuthURI, urlutil.QuerySessionEncrypted, urlutil.QueryRedirectURI})
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePathAndQueryRoute("/", []string{urlutil.QueryForwardAuthURI, urlutil.QuerySessionEncrypted, urlutil.QueryRedirectURI})
if err != nil {
return nil, err
}
routes = append(routes, r)
r, err = srv.buildControlPlanePathAndQueryRoute("/", []string{urlutil.QueryForwardAuthURI})
if err != nil {
return nil, err
}
routes = append(routes, r)
// otherwise, enforce ext_authz; pass all other requests through to an upstream
// handler that will simply respond with http status 200 / OK indicating that
// the fronting forward-auth proxy can continue.
r, err = srv.buildControlPlaneProtectedPrefixRoute("/")
if err != nil {
return nil, err
}
routes = append(routes, r)
}
return routes, nil
}
func (srv *Server) buildControlPlaneProtectedPrefixRoute(prefix string) (*envoy_config_route_v3.Route, error) {
return &envoy_config_route_v3.Route{
Name: "pomerium-protected-prefix-" + prefix,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
}, nil
}
func (srv *Server) buildControlPlanePathAndQueryRoute(path string, queryparams []string) (*envoy_config_route_v3.Route, error) {
var queryParameterMatchers []*envoy_config_route_v3.QueryParameterMatcher
for _, q := range queryparams {
queryParameterMatchers = append(queryParameterMatchers,
&envoy_config_route_v3.QueryParameterMatcher{
Name: q,
QueryParameterMatchSpecifier: &envoy_config_route_v3.QueryParameterMatcher_PresentMatch{PresentMatch: true},
})
}
return &envoy_config_route_v3.Route{
Name: "pomerium-path-and-query" + path,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Path{Path: path},
QueryParameters: queryParameterMatchers,
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
TypedPerFilterConfig: map[string]*any.Any{
"envoy.filters.http.ext_authz": disableExtAuthz,
},
}, nil
}
func (srv *Server) buildControlPlanePathRoute(path string, protected bool) (*envoy_config_route_v3.Route, error) {
r := &envoy_config_route_v3.Route{
Name: "pomerium-path-" + path,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Path{Path: path},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
}
if !protected {
r.TypedPerFilterConfig = map[string]*any.Any{
"envoy.filters.http.ext_authz": disableExtAuthz,
}
}
return r, nil
}
func (srv *Server) buildControlPlanePrefixRoute(prefix string, protected bool) (*envoy_config_route_v3.Route, error) {
r := &envoy_config_route_v3.Route{
Name: "pomerium-prefix-" + prefix,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: httpCluster,
},
},
},
}
if !protected {
r.TypedPerFilterConfig = map[string]*any.Any{
"envoy.filters.http.ext_authz": disableExtAuthz,
}
}
return r, nil
}
var getPolicyName = func(policy *config.Policy) string {
if policy.EnvoyOpts != nil && policy.EnvoyOpts.Name != "" {
return policy.EnvoyOpts.Name
}
id, _ := policy.RouteID()
return fmt.Sprintf("policy-%x", id)
}
func (srv *Server) buildPolicyRoutes(options *config.Options, domain string) ([]*envoy_config_route_v3.Route, error) {
var routes []*envoy_config_route_v3.Route
responseHeadersToAdd := toEnvoyHeaders(options.Headers)
for i, p := range options.GetAllPolicies() {
policy := p
if !hostMatchesDomain(policy.Source.URL, domain) {
continue
}
match := mkRouteMatch(&policy)
requestHeadersToAdd := toEnvoyHeaders(policy.SetRequestHeaders)
requestHeadersToRemove := getRequestHeadersToRemove(options, &policy)
envoyRoute := &envoy_config_route_v3.Route{
Name: fmt.Sprintf("policy-%d", i),
Match: match,
Metadata: &envoy_config_core_v3.Metadata{
FilterMetadata: map[string]*structpb.Struct{
"envoy.filters.http.lua": {
Fields: map[string]*structpb.Value{
"remove_pomerium_cookie": {
Kind: &structpb.Value_StringValue{
StringValue: options.CookieName,
},
},
"remove_pomerium_authorization": {
Kind: &structpb.Value_BoolValue{
BoolValue: true,
},
},
"remove_impersonate_headers": {
Kind: &structpb.Value_BoolValue{
BoolValue: policy.KubernetesServiceAccountTokenFile != "" || policy.KubernetesServiceAccountToken != "",
},
},
},
},
},
},
RequestHeadersToAdd: requestHeadersToAdd,
RequestHeadersToRemove: requestHeadersToRemove,
ResponseHeadersToAdd: responseHeadersToAdd,
}
if policy.Redirect != nil {
action, err := srv.buildPolicyRouteRedirectAction(policy.Redirect)
if err != nil {
return nil, err
}
envoyRoute.Action = &envoy_config_route_v3.Route_Redirect{Redirect: action}
} else {
action, err := srv.buildPolicyRouteRouteAction(options, &policy)
if err != nil {
return nil, err
}
envoyRoute.Action = &envoy_config_route_v3.Route_Route{Route: action}
}
routes = append(routes, envoyRoute)
}
return routes, nil
}
func (srv *Server) buildPolicyRouteRedirectAction(r *config.PolicyRedirect) (*envoy_config_route_v3.RedirectAction, error) {
action := &envoy_config_route_v3.RedirectAction{}
switch {
case r.HTTPSRedirect != nil:
action.SchemeRewriteSpecifier = &envoy_config_route_v3.RedirectAction_HttpsRedirect{
HttpsRedirect: *r.HTTPSRedirect,
}
case r.SchemeRedirect != nil:
action.SchemeRewriteSpecifier = &envoy_config_route_v3.RedirectAction_SchemeRedirect{
SchemeRedirect: *r.SchemeRedirect,
}
}
if r.HostRedirect != nil {
action.HostRedirect = *r.HostRedirect
}
if r.PortRedirect != nil {
action.PortRedirect = *r.PortRedirect
}
switch {
case r.PathRedirect != nil:
action.PathRewriteSpecifier = &envoy_config_route_v3.RedirectAction_PathRedirect{
PathRedirect: *r.PathRedirect,
}
case r.PrefixRewrite != nil:
action.PathRewriteSpecifier = &envoy_config_route_v3.RedirectAction_PrefixRewrite{
PrefixRewrite: *r.PrefixRewrite,
}
}
if r.ResponseCode != nil {
action.ResponseCode = envoy_config_route_v3.RedirectAction_RedirectResponseCode(*r.ResponseCode)
}
if r.StripQuery != nil {
action.StripQuery = *r.StripQuery
}
return action, nil
}
func (srv *Server) buildPolicyRouteRouteAction(options *config.Options, policy *config.Policy) (*envoy_config_route_v3.RouteAction, error) {
clusterName := getPolicyName(policy)
routeTimeout := getRouteTimeout(options, policy)
idleTimeout := getRouteIdleTimeout(policy)
prefixRewrite, regexRewrite := getRewriteOptions(policy)
upgradeConfigs := []*envoy_config_route_v3.RouteAction_UpgradeConfig{
{
UpgradeType: "websocket",
Enabled: &wrappers.BoolValue{Value: policy.AllowWebsockets},
},
{
UpgradeType: "spdy/3.1",
Enabled: &wrappers.BoolValue{Value: policy.AllowSPDY},
},
}
if urlutil.IsTCP(policy.Source.URL) {
upgradeConfigs = append(upgradeConfigs, &envoy_config_route_v3.RouteAction_UpgradeConfig{
UpgradeType: "CONNECT",
Enabled: &wrappers.BoolValue{Value: true},
ConnectConfig: &envoy_config_route_v3.RouteAction_UpgradeConfig_ConnectConfig{},
})
}
action := &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: clusterName,
},
UpgradeConfigs: upgradeConfigs,
HostRewriteSpecifier: &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: &wrappers.BoolValue{Value: !policy.PreserveHostHeader},
},
Timeout: routeTimeout,
IdleTimeout: idleTimeout,
PrefixRewrite: prefixRewrite,
RegexRewrite: regexRewrite,
}
setHostRewriteOptions(policy, action)
return action, nil
}
func mkEnvoyHeader(k, v string) *envoy_config_core_v3.HeaderValueOption {
return &envoy_config_core_v3.HeaderValueOption{
Header: &envoy_config_core_v3.HeaderValue{
Key: k,
Value: v,
},
Append: &wrappers.BoolValue{Value: false},
}
}
func toEnvoyHeaders(headers map[string]string) []*envoy_config_core_v3.HeaderValueOption {
var ks []string
for k := range headers {
ks = append(ks, k)
}
sort.Strings(ks)
envoyHeaders := make([]*envoy_config_core_v3.HeaderValueOption, 0, len(headers))
for _, k := range ks {
envoyHeaders = append(envoyHeaders, mkEnvoyHeader(k, headers[k]))
}
return envoyHeaders
}
func mkRouteMatch(policy *config.Policy) *envoy_config_route_v3.RouteMatch {
match := &envoy_config_route_v3.RouteMatch{}
switch {
case urlutil.IsTCP(policy.Source.URL):
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_ConnectMatcher_{
ConnectMatcher: &envoy_config_route_v3.RouteMatch_ConnectMatcher{},
}
case policy.Regex != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_SafeRegex{
SafeRegex: &envoy_type_matcher_v3.RegexMatcher{
EngineType: &envoy_type_matcher_v3.RegexMatcher_GoogleRe2{
GoogleRe2: &envoy_type_matcher_v3.RegexMatcher_GoogleRE2{},
},
Regex: policy.Regex,
},
}
case policy.Path != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Path{Path: policy.Path}
case policy.Prefix != "":
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Prefix{Prefix: policy.Prefix}
default:
match.PathSpecifier = &envoy_config_route_v3.RouteMatch_Prefix{Prefix: "/"}
}
return match
}
func getRequestHeadersToRemove(options *config.Options, policy *config.Policy) []string {
requestHeadersToRemove := policy.RemoveRequestHeaders
if !policy.PassIdentityHeaders {
requestHeadersToRemove = append(requestHeadersToRemove, httputil.HeaderPomeriumJWTAssertion)
for _, claim := range options.JWTClaimsHeaders {
requestHeadersToRemove = append(requestHeadersToRemove, httputil.PomeriumJWTHeaderName(claim))
}
}
return requestHeadersToRemove
}
func getRouteTimeout(options *config.Options, policy *config.Policy) *durationpb.Duration {
var routeTimeout *durationpb.Duration
if policy.UpstreamTimeout != 0 {
routeTimeout = ptypes.DurationProto(policy.UpstreamTimeout)
} else if policy.AllowWebsockets || urlutil.IsTCP(policy.Source.URL) {
routeTimeout = ptypes.DurationProto(0)
} else {
routeTimeout = ptypes.DurationProto(options.DefaultUpstreamTimeout)
}
return routeTimeout
}
func getRouteIdleTimeout(policy *config.Policy) *durationpb.Duration {
var idleTimeout *durationpb.Duration
if policy.AllowWebsockets || urlutil.IsTCP(policy.Source.URL) {
idleTimeout = ptypes.DurationProto(0)
}
return idleTimeout
}
func getRewriteOptions(policy *config.Policy) (prefixRewrite string, regexRewrite *envoy_type_matcher_v3.RegexMatchAndSubstitute) {
if policy.PrefixRewrite != "" {
prefixRewrite = policy.PrefixRewrite
} else if policy.RegexRewritePattern != "" {
regexRewrite = &envoy_type_matcher_v3.RegexMatchAndSubstitute{
Pattern: &envoy_type_matcher_v3.RegexMatcher{
EngineType: &envoy_type_matcher_v3.RegexMatcher_GoogleRe2{
GoogleRe2: &envoy_type_matcher_v3.RegexMatcher_GoogleRE2{},
},
Regex: policy.RegexRewritePattern,
},
Substitution: policy.RegexRewriteSubstitution,
}
} else if len(policy.To) > 0 && policy.To[0].URL.Path != "" {
prefixRewrite = policy.To[0].URL.Path
}
return prefixRewrite, regexRewrite
}
func setHostRewriteOptions(policy *config.Policy, action *envoy_config_route_v3.RouteAction)
|
func hasPublicPolicyMatchingURL(options *config.Options, requestURL url.URL) bool {
for _, policy := range options.GetAllPolicies() {
if policy.AllowPublicUnauthenticatedAccess && policy.Matches(requestURL) {
return true
}
}
return false
}
|
{
switch {
case policy.HostRewrite != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: policy.HostRewrite,
}
case policy.HostRewriteHeader != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewriteHeader{
HostRewriteHeader: policy.HostRewriteHeader,
}
case policy.HostPathRegexRewritePattern != "":
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_HostRewritePathRegex{
HostRewritePathRegex: &envoy_type_matcher_v3.RegexMatchAndSubstitute{
Pattern: &envoy_type_matcher_v3.RegexMatcher{
EngineType: &envoy_type_matcher_v3.RegexMatcher_GoogleRe2{
GoogleRe2: &envoy_type_matcher_v3.RegexMatcher_GoogleRE2{},
},
Regex: policy.HostPathRegexRewritePattern,
},
Substitution: policy.HostPathRegexRewriteSubstitution,
},
}
case policy.PreserveHostHeader:
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: wrapperspb.Bool(false),
}
default:
action.HostRewriteSpecifier = &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: wrapperspb.Bool(true),
}
}
}
|
test_runner.py
|
import multiprocessing
import os
import random
import shutil
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, cast
from unittest import TestLoader, TestSuite, mock, runner
from unittest.result import TestResult
from django.conf import settings
from django.db import connections
from django.test import TestCase
from django.test import runner as django_runner
from django.test.runner import DiscoverRunner
from django.test.signals import template_rendered
from scripts.lib.zulip_tools import (
TEMPLATE_DATABASE_DIR,
get_dev_uuid_var_path,
get_or_create_dev_uuid_var_path,
)
from zerver.lib import test_helpers
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import append_instrumentation_data, write_instrumentation_reports
# We need to pick an ID for this test-backend invocation, and store it
# in this global so it can be used in init_worker; this is used to
# ensure the database IDs we select are unique for each `test-backend`
# run. This probably should use a locking mechanism rather than the
# below hack, which fails 1/10000000 of the time.
random_id_range_start = str(random.randint(1, 10000000))
def get_database_id(worker_id: Optional[int] = None) -> str:
if worker_id:
return f"{random_id_range_start}_{worker_id}"
return random_id_range_start
# The root directory for this run of the test suite.
TEST_RUN_DIR = get_or_create_dev_uuid_var_path(
os.path.join("test-backend", f"run_{get_database_id()}")
)
_worker_id = 0 # Used to identify the worker process.
class TextTestResult(runner.TextTestResult):
"""
This class has unpythonic function names because base class follows
this style.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.failed_tests: List[str] = []
def addInfo(self, test: TestCase, msg: str) -> None:
self.stream.write(msg)
self.stream.flush()
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
append_instrumentation_data(data)
def startTest(self, test: TestCase) -> None:
TestResult.startTest(self, test)
self.stream.writeln(f"Running {test.id()}") # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
self.stream.flush()
def addSuccess(self, *args: Any, **kwargs: Any) -> None:
TestResult.addSuccess(self, *args, **kwargs)
def addError(self, *args: Any, **kwargs: Any) -> None:
TestResult.addError(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addFailure(self, *args: Any, **kwargs: Any) -> None:
TestResult.addFailure(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addSkip(self, test: TestCase, reason: str) -> None:
TestResult.addSkip(self, test, reason)
self.stream.writeln( # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
f"** Skipping {test.id()}: {reason}"
)
self.stream.flush()
class RemoteTestResult(django_runner.RemoteTestResult):
"""
The class follows the unpythonic style of function names of the
base class.
"""
def addInfo(self, test: TestCase, msg: str) -> None:
self.events.append(("addInfo", self.test_index, msg))
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
# Some elements of data['info'] cannot be serialized.
if "info" in data:
del data["info"]
self.events.append(("addInstrumentation", self.test_index, data))
def process_instrumented_calls(func: Callable[[Dict[str, Any]], None]) -> None:
for call in test_helpers.INSTRUMENTED_CALLS:
func(call)
SerializedSubsuite = Tuple[Type[TestSuite], List[str]]
SubsuiteArgs = Tuple[Type["RemoteTestRunner"], int, SerializedSubsuite, bool]
def run_subsuite(args: SubsuiteArgs) -> Tuple[int, Any]:
# Reset the accumulated INSTRUMENTED_CALLS before running this subsuite.
test_helpers.INSTRUMENTED_CALLS = []
# The first argument is the test runner class but we don't need it
# because we run our own version of the runner class.
_, subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(deserialize_suite(subsuite))
# Now we send instrumentation related events. This data will be
# appended to the data structure in the main thread. For Mypy,
# type of Partial is different from Callable. All the methods of
# TestResult are passed TestCase as the first argument but
# addInstrumentation does not need it.
process_instrumented_calls(partial(result.addInstrumentation, None))
return subsuite_index, result.events
# Monkey-patch django.test.runner to allow using multiprocessing
# inside tests without a “daemonic processes are not allowed to have
# children” error.
class NoDaemonContext(multiprocessing.context.ForkContext):
class Process(multiprocessing.context.ForkProcess):
daemon = cast(bool, property(lambda self: False, lambda self, value: None))
django_runner.multiprocessing = NoDaemonContext()
def destroy_test_databases(worker_id: Optional[int] = None) -> None:
for alias in connections:
connection = connections[alias]
def monkey_patched_destroy_test_db(test_database_name: str, verbosity: Any) -> None:
"""
We need to monkey-patch connection.creation._destroy_test_db to
use the IF EXISTS parameter - we don't have a guarantee that the
database we're cleaning up actually exists and since Django 3.1 the original implementation
throws an ugly `RuntimeError: generator didn't stop after throw()` exception and triggers
a confusing warnings.warn inside the postgresql backend implementation in _nodb_cursor()
if the database doesn't exist.
https://code.djangoproject.com/ticket/32376
"""
with connection.creation._nodb_cursor() as cursor:
quoted_name = connection.creation.connection.ops.quote_name(test_database_name)
query = f"DROP DATABASE IF EXISTS {quoted_name}"
cursor.execute(query)
with mock.patch.object(
connection.creation, "_destroy_test_db", monkey_patched_destroy_test_db
):
# In the parallel mode, the test databases are created
# through the N=self.parallel child processes, and in the
# parent process (which calls `destroy_test_databases`),
# `settings_dict` remains unchanged, with the original
# template database name (zulip_test_template). So to
# delete the database zulip_test_template_<number>, we
# need to pass `number` to `destroy_test_db`.
#
# When we run in serial mode (self.parallel=1), we don't
# fork and thus both creation and destruction occur in the
# same process, which means `settings_dict` has been
# updated to have `zulip_test_template_<number>` as its
# database name by the creation code. As a result, to
# delete that database, we need to not pass a number
# argument to destroy_test_db.
if worker_id is not None:
"""Modified from the Django original to"""
database_id = get_database_id(worker_id)
connection.creation.destroy_test_db(suffix=database_id)
else:
connection.creation.destroy_test_db()
def create_test_databases(worker_id: int) -> None:
database_id = get_database_id(worker_id)
for alias in connections:
connection = connections[alias]
connection.creation.clone_test_db(
suffix=database_id,
keepdb=True,
)
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
|
def init_worker(counter: "multiprocessing.sharedctypes.Synchronized[int]") -> None:
"""
This function runs only under parallel mode. It initializes the
individual processes which are also called workers.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
"""
You can now use _worker_id.
"""
# Clear the cache
from zerver.lib.cache import get_cache_backend
cache = get_cache_backend(None)
cache.clear()
# Close all connections
connections.close_all()
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
initialize_worker_path(_worker_id)
# We manually update the upload directory path in the URL regex.
from zproject.dev_urls import avatars_url
assert settings.LOCAL_UPLOADS_DIR is not None
assert avatars_url.default_args is not None
new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
avatars_url.default_args["document_root"] = new_root
class ParallelTestSuite(django_runner.ParallelTestSuite):
run_subsuite = run_subsuite
init_worker = init_worker
def __init__(self, suite: TestSuite, processes: int, failfast: bool) -> None:
super().__init__(suite, processes, failfast)
# We can't specify a consistent type for self.subsuites, since
# the whole idea here is to monkey-patch that so we can use
# most of django_runner.ParallelTestSuite with our own suite
# definitions.
assert not isinstance(self.subsuites, SubSuiteList)
self.subsuites: Union[SubSuiteList, List[TestSuite]] = SubSuiteList(self.subsuites)
def check_import_error(test_name: str) -> None:
try:
# Directly using __import__ is not recommended, but here it gives
# clearer traceback as compared to importlib.import_module.
__import__(test_name)
except ImportError as exc:
raise exc from exc # Disable exception chaining in Python 3.
def initialize_worker_path(worker_id: int) -> None:
# Allow each test worker process to write to a unique directory
# within `TEST_RUN_DIR`.
worker_path = os.path.join(TEST_RUN_DIR, f"worker_{_worker_id}")
os.makedirs(worker_path, exist_ok=True)
settings.TEST_WORKER_DIR = worker_path
# Every process should upload to a separate directory so that
# race conditions can be avoided.
settings.LOCAL_UPLOADS_DIR = get_or_create_dev_uuid_var_path(
os.path.join(
"test-backend",
os.path.basename(TEST_RUN_DIR),
os.path.basename(worker_path),
"test_uploads",
)
)
settings.SENDFILE_ROOT = os.path.join(settings.LOCAL_UPLOADS_DIR, "files")
class Runner(DiscoverRunner):
parallel_test_suite = ParallelTestSuite
def __init__(self, *args: Any, **kwargs: Any) -> None:
DiscoverRunner.__init__(self, *args, **kwargs)
# `templates_rendered` holds templates which were rendered
# in proper logical tests.
self.templates_rendered: Set[str] = set()
# `shallow_tested_templates` holds templates which were rendered
# in `zerver.tests.test_templates`.
self.shallow_tested_templates: Set[str] = set()
template_rendered.connect(self.on_template_rendered)
def get_resultclass(self) -> Optional[Type[TextTestResult]]:
return TextTestResult
def on_template_rendered(self, sender: Any, context: Dict[str, Any], **kwargs: Any) -> None:
if hasattr(sender, "template"):
template_name = sender.template.name
if template_name not in self.templates_rendered:
if context.get("shallow_tested") and template_name not in self.templates_rendered:
self.shallow_tested_templates.add(template_name)
else:
self.templates_rendered.add(template_name)
self.shallow_tested_templates.discard(template_name)
def get_shallow_tested_templates(self) -> Set[str]:
return self.shallow_tested_templates
def setup_test_environment(self, *args: Any, **kwargs: Any) -> Any:
settings.DATABASES["default"]["NAME"] = settings.BACKEND_DATABASE_TEMPLATE
# We create/destroy the test databases in run_tests to avoid
# duplicate work when running in parallel mode.
# Write the template database ids to a file that we can
# reference for cleaning them up if they leak.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
if self.parallel > 1:
for index in range(self.parallel):
f.write(get_database_id(index + 1) + "\n")
else:
f.write(get_database_id() + "\n")
# Check if we are in serial mode to avoid unnecessarily making a directory.
# We add "worker_0" in the path for consistency with parallel mode.
if self.parallel == 1:
initialize_worker_path(0)
return super().setup_test_environment(*args, **kwargs)
def teardown_test_environment(self, *args: Any, **kwargs: Any) -> Any:
# The test environment setup clones the zulip_test_template
# database, creating databases with names:
# 'zulip_test_template_N_<worker_id>',
# where N is `random_id_range_start`, and `worker_id` is a
# value between <1, self.parallel>.
#
# We need to delete those databases to avoid leaking disk
# (Django is smart and calls this on SIGINT too).
if self.parallel > 1:
for index in range(self.parallel):
destroy_test_databases(index + 1)
else:
destroy_test_databases()
# Clean up our record of which databases this process created.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.remove(filepath)
# Clean up our test runs root directory.
try:
shutil.rmtree(TEST_RUN_DIR)
except OSError:
print("Unable to clean up the test run's directory.")
return super().teardown_test_environment(*args, **kwargs)
def test_imports(
self, test_labels: List[str], suite: Union[TestSuite, ParallelTestSuite]
) -> None:
prefix_old = "unittest.loader.ModuleImportFailure." # Python <= 3.4
prefix_new = "unittest.loader._FailedTest." # Python > 3.4
error_prefixes = [prefix_old, prefix_new]
for test_name in get_test_names(suite):
for prefix in error_prefixes:
if test_name.startswith(prefix):
test_name = test_name[len(prefix) :]
for label in test_labels:
# This code block is for Python 3.5 when test label is
# directly provided, for example:
# ./tools/test-backend zerver.tests.test_alert_words.py
#
# In this case, the test name is of this form:
# 'unittest.loader._FailedTest.test_alert_words'
#
# Whereas check_import_error requires test names of
# this form:
# 'unittest.loader._FailedTest.zerver.tests.test_alert_words'.
if test_name in label:
test_name = label
break
check_import_error(test_name)
def run_tests(
self,
test_labels: List[str],
extra_tests: Optional[List[TestCase]] = None,
full_suite: bool = False,
include_webhooks: bool = False,
**kwargs: Any,
) -> Tuple[bool, List[str]]:
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
except AttributeError:
# We are likely to get here only when running tests in serial
# mode on Python 3.4 or lower.
# test_labels are always normalized to include the correct prefix.
# If we run the command with ./tools/test-backend test_alert_words,
# test_labels will be equal to ['zerver.tests.test_alert_words'].
for test_label in test_labels:
check_import_error(test_label)
# I think we won't reach this line under normal circumstances, but
# for some unforeseen scenario in which the AttributeError was not
# caused by an import error, let's re-raise the exception for
# debugging purposes.
raise
self.test_imports(test_labels, suite)
if self.parallel == 1:
# We are running in serial mode so create the databases here.
# For parallel mode, the databases are created in init_worker.
# We don't want to create and destroy DB in setup_test_environment
# because it will be called for both serial and parallel modes.
# However, at this point we know in which mode we would be running
# since that decision has already been made in build_suite().
#
# We pass a _worker_id, which in this code path is always 0
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
with get_sqlalchemy_connection():
result = self.run_suite(suite)
self.teardown_test_environment()
failed = self.suite_result(suite, result)
if not failed:
write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
return failed, result.failed_tests
def get_test_names(suite: Union[TestSuite, ParallelTestSuite]) -> List[str]:
if isinstance(suite, ParallelTestSuite):
# suite is ParallelTestSuite. It will have a subsuites parameter of
# type SubSuiteList. Each element of a SubsuiteList is a tuple whose
# first element is the type of TestSuite and the second element is a
# list of test names in that test suite. See serialize_suite() for the
# implementation details.
assert isinstance(suite.subsuites, SubSuiteList)
return [name for subsuite in suite.subsuites for name in subsuite[1]]
else:
return [t.id() for t in get_tests_from_suite(suite)]
def get_tests_from_suite(suite: TestSuite) -> TestCase:
for test in suite:
if isinstance(test, TestSuite):
yield from get_tests_from_suite(test)
else:
yield test
def serialize_suite(suite: TestSuite) -> Tuple[Type[TestSuite], List[str]]:
return type(suite), get_test_names(suite)
def deserialize_suite(args: Tuple[Type[TestSuite], List[str]]) -> TestSuite:
suite_class, test_names = args
suite = suite_class()
tests = TestLoader().loadTestsFromNames(test_names)
for test in get_tests_from_suite(tests):
suite.addTest(test)
return suite
class RemoteTestRunner(django_runner.RemoteTestRunner):
resultclass = RemoteTestResult
class SubSuiteList(List[Tuple[Type[TestSuite], List[str]]]):
"""
This class allows us to avoid changing the main logic of
ParallelTestSuite and still make it serializable.
"""
def __init__(self, suites: List[TestSuite]) -> None:
serialized_suites = [serialize_suite(s) for s in suites]
super().__init__(serialized_suites)
def __getitem__(self, index: Any) -> Any:
suite = super().__getitem__(index)
return deserialize_suite(suite)
| |
q23.py
|
# @Auther : wuwuwu
# @Time : 2020/4/15
# @File : q23.py
# @Description : 直方图均衡化
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def histogramEqu
|
55):
"""
直方图均衡化
:param img:
:param Zmax: 像素的最大取值
:return:
"""
H, W, C = img.shape
S = H * W * C
dst = img.copy()
sum_h = 0
for i in range(1, 255):
index = np.where(img == i)
sum_h += len(img[index])
dst[index] = Zmax / S * sum_h
return np.clip(dst, 0, 255).astype(np.uint8)
if __name__ == '__main__':
img = cv.imread('lenna.jpg')
dst = histogramEqualization(img, Zmax=255)
plt.figure()
plt.hist(img.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('input histogram')
plt.figure()
plt.hist(dst.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('output histogram')
plt.show()
cv.imshow('input', img)
cv.imshow('output', dst)
cv.waitKey(0)
cv.destroyAllWindows()
|
alization(img, Zmax=2
|
index.js
|
export * from './client-config.js';
export * from './page-layout';
export * from './error-status-code.js';
| ||
boot.js
|
(function() {
var sui = Bangle.setUI;
Bangle.setUI = function(mode, cb) {
if (mode!="clock") return sui(mode,cb);
|
if (settings.BTN1) load(settings.BTN1);
} else if (dir == 1) {
if (settings.BTN3) load(settings.BTN3);
}
});
};
})();
|
return sui("clockupdown", (dir) => {
let settings = require("Storage").readJSON("shortcuts.json", 1)||{};
if (dir == -1) {
|
cache.py
|
import numpy as np
import math
class Cache():
def __init__(self, max_size=10):
self.cache = []
self.size = 0
self.max_size=max_size
def add(self, element):
self.cache.append(element)
self.size+=1
if self.size > self.max_size:
del self.cache[0]
self.size = self.max_size
def mean(self):
return np.mean(np.array(self.cache), axis=0)
def empty(self):
return self.size == 0
def get_size(self):
return self.size
def get_last(self):
return self.cache[self.size-1]
def
|
(self):
for e in self.cache:
print(e)
if __name__ == '__main__':
print('===Test Cache===')
cache = Cache(max_size=5)
cache.add([5,4])
print(cache.get_size())
print(cache.print_cache())
cache.add([8,1])
cache.add([3,2])
cache.add([4,5])
cache.add([6,2])
print(cache.get_size())
print(cache.print_cache())
cache.add([1,4])
print(cache.get_size())
print(cache.print_cache())
print(cache.mean())
|
print_cache
|
0056_auto_20180819_1420.py
|
# Generated by Django 2.1 on 2018-08-19 08:50
from django.db import migrations, models
class
|
(migrations.Migration):
dependencies = [
('mainapp', '0055_rescuecamp_facilities_available'),
]
operations = [
migrations.AlterField(
model_name='rescuecamp',
name='facilities_available',
field=models.TextField(blank=True, null=True, verbose_name='Facilities Available (light, kitchen, toilets etc.) - ലഭ്യമായ സൗകര്യങ്ങൾ'),
),
]
|
Migration
|
camera_plugins_node.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import ros_numpy
import cv2
class image_listenner:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/pylon_camera_node/image_raw",Image,self.image_sub_callback)
self.image_numpy_pub = rospy.Publisher('/pylon_camera_node/image_raw/rgb', Image, queue_size=1)
def image_sub_callback(self, msg):
''' callback of image_sub '''
try:
self.img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
new_msg = ros_numpy.msgify(Image, self.img, encoding='bgr8')
new_msg.header.seq = msg.header.seq
new_msg.header.stamp = msg.header.stamp
new_msg.header.frame_id = "image_rgb"
self.image_numpy_pub.publish(new_msg)
print(new_msg.header.seq)
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
rospy.init_node('image_listenner', anonymous=True)
image_listenning = image_listenner()
try:
rospy.spin()
|
cv2.destroyAllWindows()
|
except KeyboardInterrupt:
print("Shutting down")
|
common.py
|
"""
Copyright (c) 2017, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in the development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by
BATTELLE
for the
UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
"""
from datetime import timedelta as td
from volttron.platform.agent.math_utils import mean
DX = '/diagnostic message'
"""Common functions used across multiple algorithms."""
def create_table_key(table_name, timestamp):
return "&".join([table_name, timestamp.isoformat()])
def check_date(current_time, timestamp_array):
|
def check_run_status(timestamp_array, current_time, no_required_data, minimum_diagnostic_time=None,
run_schedule="hourly", minimum_point_array=None):
"""
The diagnostics run at a regular interval (some minimum elapsed amount of time) and have a
minimum data count requirement (each time series of data must contain some minimum number of points).
:param timestamp_array:
:param current_time:
:param no_required_data:
:param minimum_diagnostic_time:
:param run_schedule:
:param minimum_point_array:
:return:
"""
def minimum_data():
min_data_array = timestamp_array if minimum_point_array is None else minimum_point_array
if len(min_data_array) < no_required_data:
return None
return True
if minimum_diagnostic_time is not None and timestamp_array:
sampling_interval = td(minutes=
round(((timestamp_array[-1] - timestamp_array[0]) / len(timestamp_array)).total_seconds() / 60))
required_time = (timestamp_array[-1] - timestamp_array[0]) + sampling_interval
if required_time >= minimum_diagnostic_time:
return minimum_data()
return False
if run_schedule == "hourly":
if timestamp_array and timestamp_array[-1].hour != current_time.hour:
return minimum_data()
elif run_schedule == "daily":
if timestamp_array and timestamp_array[-1].date() != current_time.date():
return minimum_data()
return False
def setpoint_control_check(set_point_array, point_array, setpoint_deviation_threshold, dx_name, dx_offset, dx_result):
"""
Verify that point if tracking with set point - identify potential control or sensor problems.
:param set_point_array:
:param point_array:
:param allowable_deviation:
:param dx_name:
:param dx_offset:
:param dx_result:
:return:
"""
avg_set_point = None
diagnostic_msg = {}
for key, threshold in list(setpoint_deviation_threshold.items()):
if set_point_array:
avg_set_point = sum(set_point_array)/len(set_point_array)
zipper = (set_point_array, point_array)
set_point_tracking = [abs(x - y) for x, y in zip(*zipper)]
set_point_tracking = mean(set_point_tracking)/avg_set_point*100.
if set_point_tracking > threshold:
# color_code = 'red'
msg = '{} - {}: point deviating significantly from set point.'.format(key, dx_name)
result = 1.1 + dx_offset
else:
# color_code = 'green'
msg = " {} - No problem detected for {} set".format(key, dx_name)
result = 0.0 + dx_offset
else:
# color_code = 'grey'
msg = "{} - {} set point data is not available.".format(key, dx_name)
result = 2.2 + dx_offset
dx_result.log(msg)
diagnostic_msg.update({key: result})
dx_table = {dx_name + DX: diagnostic_msg}
return avg_set_point, dx_table, dx_result
def pre_conditions(message, dx_list, analysis, cur_time, dx_result):
"""
Check for persistence of failure to meet pre-conditions for diagnostics.
:param message:
:param dx_list:
:param analysis:
:param cur_time:
:param dx_result:
:return:
"""
dx_msg = {'low': message, 'normal': message, 'high': message}
for diagnostic in dx_list:
dx_table = {diagnostic + DX: dx_msg}
table_key = create_table_key(analysis, cur_time)
dx_result.insert_table_row(table_key, dx_table)
return dx_result
|
"""
Check current timestamp with previous timestamp to verify that there are no large missing data gaps.
:param current_time:
:param timestamp_array:
:return:
"""
if not timestamp_array:
return False
if current_time.date() != timestamp_array[-1].date():
if (timestamp_array[-1].date() + td(days=1) != current_time.date() or
(timestamp_array[-1].hour != 23 and current_time.hour == 0)):
return True
return False
|
closest_point.rs
|
use crate::{
algorithms::Length,
primitives::{Arc, Line},
};
use euclid::{approxeq::ApproxEq, Point2D, Scale, Vector2D};
use std::iter::FromIterator;
/// Find the location on an object which is closest to a target point.
///
/// # Usage
///
/// When trying to find the closest point to a [`Line`] you have the simple
/// cases, like when the point is directly on or above the line.
///
/// ```rust
/// # use arcs_core::{primitives::Line, algorithms::{ClosestPoint, Closest}};
/// # type Point = euclid::default::Point2D<f64>;
/// let start = Point::new(-10.0, 0.0);
/// let line = Line::new(start, Point::new(10.0, 0.0));
///
/// // a point on the line is closest to itself
/// assert_eq!(line.closest_point(start), Closest::One(start));
///
/// // somewhere directly above the line
/// let random_point = Point::new(8.0, -5.0);
/// assert_eq!(
/// line.closest_point(random_point),
/// Closest::One(Point::new(8.0, 0.0)),
/// );
/// ```
///
/// You can also have situations where there are multiple locations on an object
/// which are closest to the part. For example, somewhere halfway between the
/// start and end of an [`Arc`].
///
/// ```rust
/// # use arcs_core::{primitives::Arc, algorithms::{ClosestPoint, Closest}, Angle};
/// # type Point = euclid::default::Point2D<f64>;
/// let arc = Arc::from_centre_radius(
/// Point::new(0.0, 0.0),
/// 10.0,
/// Angle::zero(),
/// Angle::frac_pi_2() * 3.0,
/// );
///
/// let start = arc.start();
/// let end = arc.end();
/// let midpoint = start.lerp(end, 0.5);
///
/// assert_eq!(
/// arc.closest_point(midpoint),
/// Closest::Many(vec![start, end]),
/// );
/// ```
///
/// And by definition, there are infinitely many points on an arc which are
/// close to the centre.
///
/// ```rust
/// # use arcs_core::{primitives::Arc, algorithms::{ClosestPoint, Closest}, Angle};
/// # type Point = euclid::default::Point2D<f64>;
/// let arc = Arc::from_centre_radius(
/// Point::new(0.0, 0.0),
/// 10.0,
/// Angle::zero(),
/// Angle::pi(),
/// );
///
/// assert_eq!(arc.closest_point(arc.centre()), Closest::Infinite);
/// ```
pub trait ClosestPoint<Space> {
/// Calculate the closest point to `target`.
fn closest_point(&self, target: Point2D<f64, Space>) -> Closest<Space>;
}
impl<'c, Space, C: ClosestPoint<Space> + ?Sized> ClosestPoint<Space> for &'c C {
fn closest_point(&self, target: Point2D<f64, Space>) -> Closest<Space> {
(*self).closest_point(target)
}
}
impl<Space> ClosestPoint<Space> for Point2D<f64, Space> {
fn closest_point(&self, _target: Point2D<f64, Space>) -> Closest<Space> {
Closest::One(*self)
}
}
impl<Space> ClosestPoint<Space> for Line<Space> {
fn closest_point(&self, target: Point2D<f64, Space>) -> Closest<Space> {
if self.length().approx_eq(&0.0) {
return Closest::One(self.start);
}
let start = self.start;
let displacement = self.displacement();
// equation of the line: start + t * displacement, where 0 <= t <= 1
let t = Vector2D::dot(target - start, displacement)
/ (self.length() * self.length());
Closest::One(if t <= 0.0 {
self.start
} else if t >= 1.0 {
self.end
} else {
start + Scale::new(t).transform_vector(displacement)
})
}
}
impl<Space> ClosestPoint<Space> for Arc<Space> {
fn closest_point(&self, target: Point2D<f64, Space>) -> Closest<Space> {
let radial = target - self.centre();
if radial.length().approx_eq(&0.0) {
return Closest::Infinite;
}
let angle_of_closest_point = radial.angle_from_x_axis();
let ideal_closest_point =
self.centre() + radial.normalize() * self.radius();
if self.contains_angle(angle_of_closest_point) {
return Closest::One(ideal_closest_point);
}
let to_start = (self.start() - ideal_closest_point).length();
let to_end = (self.end() - ideal_closest_point).length();
if to_start.approx_eq(&to_end) {
Closest::Many(vec![self.start(), self.end()])
} else if to_start < to_end {
Closest::One(self.start())
} else {
Closest::One(self.end())
}
}
}
/// An enum containing the different possible solutions for
/// [`ClosestPoint::closest_point()`].
#[derive(Debug, Clone, PartialEq)]
pub enum Closest<Space> {
/// There are infinitely solutions.
Infinite,
/// There is a single closest [`Point2D`].
One(Point2D<f64, Space>),
/// There are multiple closest [`Point2D`]s.
Many(Vec<Point2D<f64, Space>>),
}
impl<Space> Closest<Space> {
/// Are there infinitely many closest points?
pub fn is_infinite(&self) -> bool {
match self {
Closest::Infinite => true,
_ => false,
}
}
/// Get a slice of all the closest [`Point2D`]s.
///
/// # Note
///
/// This will be empty if there are infinitely many closest points.
pub fn points(&self) -> &[Point2D<f64, Space>] {
match self {
Closest::Infinite => &[],
Closest::One(item) => std::slice::from_ref(item),
Closest::Many(items) => &items,
}
}
}
impl<Space> FromIterator<Point2D<f64, Space>> for Closest<Space> {
fn from_iter<I: IntoIterator<Item = Point2D<f64, Space>>>(
iter: I,
) -> Closest<Space> {
let items = Vec::from_iter(iter);
match items.len() {
0 => Closest::Infinite,
1 => Closest::One(items[0]),
_ => Closest::Many(items),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Angle;
type Point = euclid::default::Point2D<f64>;
#[test]
fn on_the_line() {
let start = Point::new(1.0, 2.0);
let end = Point::new(3.0, 10.0);
let line = Line::new(start, end);
let midpoint = start + line.displacement() / 2.0;
let got = line.closest_point(midpoint);
assert_eq!(got, Closest::One(midpoint));
}
#[test]
fn closest_point_to_zero_length_line() {
let start = Point::new(1.0, 2.0);
let line = Line::new(start, start);
assert_eq!(line.length(), 0.0);
let target = Point::new(10.0, 0.0);
let got = line.closest_point(target);
assert_eq!(got, Closest::One(start));
}
#[test]
fn away_from_the_line() {
let start = Point::new(0.0, 0.0);
let end = Point::new(10.0, 0.0);
let line = Line::new(start, end);
let got = line.closest_point(Point::new(5.0, 5.0));
assert_eq!(got, Closest::One(Point::new(5.0, 0.0)));
}
#[test]
fn past_the_end_of_the_line() {
let start = Point::new(0.0, 0.0);
let end = Point::new(10.0, 0.0);
let line = Line::new(start, end);
let got = line.closest_point(Point::new(15.0, 5.0));
assert_eq!(got, Closest::One(end));
}
#[test]
fn before_the_start_of_the_line() {
let start = Point::new(0.0, 0.0);
let end = Point::new(10.0, 0.0);
let line = Line::new(start, end);
let got = line.closest_point(Point::new(-5.0, 5.0));
assert_eq!(got, Closest::One(start));
}
#[test]
fn centre_of_an_arc() {
let centre = Point::zero();
let arc =
Arc::from_centre_radius(centre, 10.0, Angle::zero(), Angle::pi());
let got = arc.closest_point(centre);
assert_eq!(got, Closest::Infinite);
}
#[test]
fn arc_start_point() {
let centre = Point::zero();
let arc =
Arc::from_centre_radius(centre, 10.0, Angle::zero(), Angle::pi());
let got = arc.closest_point(arc.start());
assert_eq!(got, Closest::One(arc.start()));
}
#[test]
fn arc_end_point()
|
#[test]
fn midway_between_arc_end_points() {
let centre = Point::zero();
let arc =
Arc::from_centre_radius(centre, 10.0, Angle::zero(), Angle::pi());
let got = arc.closest_point(Point::new(0.0, -10.0));
assert_eq!(got, Closest::Many(vec![arc.start(), arc.end()]));
}
}
|
{
let centre = Point::zero();
let arc =
Arc::from_centre_radius(centre, 10.0, Angle::zero(), Angle::pi());
let got = arc.closest_point(arc.end());
assert_eq!(got, Closest::One(arc.end()));
}
|
GoldenTrillionCash_es.ts
|
<TS language="es" version="2.1">
<context>
<name>AddressBookPage</name>
<message>
<source>Right-click to edit address or label</source>
<translation>Click derecho para editar la dirección o etiqueta</translation>
</message>
<message>
<source>Create a new address</source>
<translation>Crear nueva dirección</translation>
</message>
<message>
<source>New</source>
<translation>Nuevo</translation>
</message>
<message>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copiar la dirección seleccionada al portapapeles</translation>
</message>
<message>
<source>Copy</source>
<translation>Copiar</translation>
</message>
<message>
<source>Delete the currently selected address from the list</source>
<translation>Borrar la dirección seleccionada de la lista</translation>
</message>
<message>
<source>Delete</source>
<translation>Borrar</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>Exportar los datos de la pestaña actual a un archivo</translation>
</message>
<message>
<source>Export</source>
<translation>Exportar</translation>
</message>
<message>
<source>Close</source>
<translation>Cerrar</translation>
</message>
<message>
<source>Choose the address to send coins to</source>
<translation>Escoja la dirección a la cual desee enviar las monedas</translation>
</message>
<message>
<source>Choose the address to receive coins with</source>
<translation>Escoja la dirección en la cual recibirá las monedas</translation>
</message>
<message>
<source>Choose</source>
<translation>Escoger</translation>
</message>
<message>
<source>Sending addresses</source>
<translation>Direcciones de envío</translation>
</message>
<message>
<source>Receiving addresses</source>
<translation>Direcciones de recepción</translation>
</message>
<message>
<source>These are your GTR addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Estas son sus direcciones GTR para realizar pagos. Verifique siempre la cantidad y la dirección de recepción antes de enviar monedas.</translation>
</message>
<message>
<source>These are your GTR addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source>
<translation>Estas son sus direcciones GTR para recibir pagos. Es recomendable usar una nueva dirección de recepción para cada transacción.</translation>
</message>
<message>
<source>Copy Address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<source>Copy Label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<source>Edit</source>
<translation>Editar</translation>
</message>
<message>
<source>Export Address List</source>
<translation>Exportar lista de direcciones</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Archivo separado por comas (*.csv)</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>Exportación fallida</translation>
</message>
<message>
<source>There was an error trying to save the address list to %1. Please try again.</source>
<translation>Ha habido un error intentando guardar la lista de direcciones %1. Por favor inténtelo de nuevo.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<source>Passphrase Dialog</source>
<translation>Diálogo de contraseña</translation>
</message>
<message>
<source>Enter passphrase</source>
<translation>Introduzca la contraseña</translation>
</message>
<message>
<source>New passphrase</source>
<translation>Nueva contraseña</translation>
</message>
<message>
<source>Repeat new passphrase</source>
<translation>Repetir nueva contraseña</translation>
</message>
<message>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>Sirve para desactivar el envío de dinero cuando la cuenta del SO está comprometida. No provee de una seguridad real</translation>
</message>
<message>
<source>For anonymization, automint, and staking only</source>
<translation>Para anonimización y staking solamente</translation>
</message>
<message>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Escriba la nueva contraseña para el monedero. <br/>Por favor utilice una contraseña de <b>10 o más caracteres aleatorios</b>, u <b> ocho o más palabras</b></translation>
</message>
<message>
<source>Encrypt wallet</source>
<translation>Cifrar monedero</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Esta operación requiere su contraseña para desbloquear el monedero</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>Desbloquear monedero</translation>
</message>
<message>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Esta operación requiere su contraseña para descifrar el monedero</translation>
</message>
<message>
<source>Decrypt wallet</source>
<translation>Descifrar monedero</translation>
</message>
<message>
<source>Change passphrase</source>
<translation>Cambiar contraseña</translation>
</message>
<message>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Ingrese la antigua y la nueva contraseña para el monedero</translation>
</message>
<message>
<source>Confirm wallet encryption</source>
<translation>Confirme el cifrado del monedero</translation>
</message>
<message>
<source>GTR will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your GTRs from being stolen by malware infecting your computer.</source>
<translation>GTR se cerrará para finalizar el proceso de cifrado. Recuerde que cifrar su monedero no garantiza que sus GTRs no sean robados mediante malware de su ordenador.</translation>
</message>
<message>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>¿Está seguro de que desea cifrar su monedero?</translation>
</message>
<message>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR GTR</b>!</source>
<translation>Atención: Si cifra su monedero y pierde su contraseña, perderá <b> TODOS SUS GTR</b>!</translation>
</message>
<message>
<source>Wallet encrypted</source>
<translation>Monedero cifrado</translation>
</message>
<message>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANTE: Cualquier copia de seguridad anterior que haya realizado de su monedero debe ser reemplazada por la nueva copia de seguridad cifrada. Por razones de seguridad, las copias de seguridad anteriores del monedero sin cifrar pasarán a ser obsoletas tan pronto empiece a utilizar el nuevo monedero cifrado.</translation>
</message>
<message>
<source>Wallet encryption failed</source>
<translation>El cifrado del monedero ha fallado</translation>
</message>
<message>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>El cifrado del monedero ha fallado debido a un error interno. Su monedero no ha sido cifrado.</translation>
</message>
<message>
<source>The supplied passphrases do not match.</source>
<translation>Las contraseñas introducidas no coinciden.</translation>
</message>
<message>
<source>Wallet unlock failed</source>
<translation>Desbloqueo del monedero fallido</translation>
</message>
<message>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La contraseña introducida para el descifrado del monedero es incorrecta.</translation>
</message>
<message>
<source>Wallet decryption failed</source>
<translation>Descifrado del monedero fallido</translation>
</message>
<message>
<source>Wallet passphrase was successfully changed.</source>
<translation>Se cambió la contraseña con éxito.</translation>
</message>
<message>
<source>Warning: The Caps Lock key is on!</source>
<translation>Aviso: La tecla Mayúsculas está encendida!</translation>
</message>
</context>
<context>
<name>BanTableModel</name>
<message>
<source>IP/Netmask</source>
<translation>IP/Máscara de Red</translation>
</message>
<message>
<source>Banned Until</source>
<translation>Suspendido hasta</translation>
</message>
</context>
<context>
<name>Bip38ToolDialog</name>
<message>
<source>BIP 38 Tool</source>
<translation>Herramienta BIP 38</translation>
</message>
<message>
<source>BIP 38 Encrypt</source>
<translation>Cifrado BIP 38</translation>
</message>
<message>
<source>Address:</source>
<translation>Dirección:</translation>
</message>
<message>
<source>Enter a GTR Address that you would like to encrypt using BIP 38. Enter a passphrase in the middle box. Press encrypt to compute the encrypted private key.</source>
<translation>Introduzca la dirección GTR que querría cifrar usando BIP 38. Introduzca una contraseña en el campo central. Haga clic en cifrar para generar la clave privada cifrada.</translation>
</message>
<message>
<source>The GTR address to encrypt</source>
<translation>La dirección GTR a cifrar</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Escoja una dirección usada previamente</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt + A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde el portapapeles</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt + P</translation>
</message>
<message>
<source>Passphrase: </source>
<translation>Contraseña:</translation>
</message>
<message>
<source>Encrypted Key:</source>
<translation>Clave cifrada:</translation>
</message>
<message>
<source>Copy the current signature to the system clipboard</source>
<translation>Copiar la firma actual al portapapeles del sistema</translation>
</message>
<message>
<source>Encrypt the private key for this GTR address</source>
<translation>Cifrar la llave privada para esta dirección GTR</translation>
</message>
<message>
<source>Reset all fields</source>
<translation>Limpiar todos los campos</translation>
</message>
<message>
<source>The encrypted private key</source>
<translation>La llave privada cifrada</translation>
</message>
<message>
<source>Decrypt the entered key using the passphrase</source>
<translation>Descifrar la llave ingresada usando la frase de contraseña</translation>
</message>
<message>
<source>Encrypt Key</source>
<translation>Cifrar Clave</translation>
</message>
<message>
<source>Clear All</source>
<translation>Limpiar Todo</translation>
</message>
<message>
<source>BIP 38 Decrypt</source>
<translation>Descifrado BIP 38</translation>
</message>
<message>
<source>Enter the BIP 38 encrypted private key. Enter the passphrase in the middle box. Click Decrypt Key to compute the private key. After the key is decrypted, clicking 'Import Address' will add this private key to the wallet.</source>
<translation>Introduzca la clave cifrada privada BIP 38. Introduzca la contraseña en el campo central. Haga clic en descifrar clave para generar la clave privada. Después de que la clave sea descifrada, pulsando en 'Importar dirección' añadirá esta clave privada al monedero.</translation>
</message>
<message>
<source>Decrypt Key</source>
<translation>Descifrar Clave</translation>
</message>
<message>
<source>Decrypted Key:</source>
<translation>Clave descifrada:</translation>
</message>
<message>
<source>Import Address</source>
<translation>Importar dirección</translation>
</message>
<message>
<source>Click "Decrypt Key" to compute key</source>
<translation>Haga clic en "Descifrar clave" para generar la clave</translation>
</message>
<message>
<source>The entered passphrase is invalid. </source>
<translation>La contraseña introducida es inválida</translation>
</message>
<message>
<source>Allowed: 0-9,a-z,A-Z,</source>
<translation>Permitido: 0-9,a.z,A-Z,</translation>
</message>
<message>
<source>The entered address is invalid.</source>
<translation>La dirección introducida es inválida</translation>
</message>
<message>
<source>Please check the address and try again.</source>
<translation>Por favor compruebe la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<source>The entered address does not refer to a key.</source>
<translation>La dirección introducida no se refiere a ninguna clave.</translation>
</message>
<message>
<source>Wallet unlock was cancelled.</source>
<translation>El desbloqueo del monedero fue cancelado.</translation>
</message>
<message>
<source>Private key for the entered address is not available.</source>
<translation>La clave privada para la dirección introducida no está disponible.</translation>
</message>
<message>
<source>Failed to decrypt.</source>
<translation>Falló el descifrado</translation>
</message>
<message>
<source>Please check the key and passphrase and try again.</source>
<translation>Por favor compruebe la clave y la contraseña e inténtelo de nuevo.</translation>
</message>
<message>
<source>Data Not Valid.</source>
<translation>Datos no válidos.</translation>
</message>
<message>
<source>Please try again.</source>
<translation>Por favor inténtelo de nuevo.</translation>
</message>
<message>
<source>Please wait while key is imported</source>
<translation>Por favor espere mientras la clave es importada</translation>
</message>
<message>
<source>Key Already Held By Wallet</source>
<translation>Llave Ya Presente En El Monedero</translation>
</message>
<message>
<source>Error Adding Key To Wallet</source>
<translation>Error Añadiendo la Clave en la Wallet</translation>
</message>
<message>
<source>Successfully Added Private Key To Wallet</source>
<translation>Clave privada añadida a la wallet con éxito</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<source>Node</source>
<translation>Nodo</translation>
</message>
<message>
<source>Overview</source>
<translation>Visión general</translation>
</message>
<message>
<source>Show general overview of wallet</source>
<translation>Mostrar visión general del monedero</translation>
</message>
<message>
<source>Send</source>
<translation>Enviar</translation>
</message>
<message>
<source>Receive</source>
<translation>Recibir</translation>
</message>
<message>
<source>Transactions</source>
<translation>Transacciones</translation>
</message>
<message>
<source>Browse transaction history</source>
<translation>Navegar por el historial de transacciones</translation>
</message>
<message>
<source>Privacy Actions for zGTR</source>
<translation>Acciones de privacidad para zGTR</translation>
</message>
<message>
<source>Exit</source>
<translation>Salir</translation>
</message>
<message>
<source>Quit application</source>
<translation>Cerrar aplicación</translation>
</message>
<message>
<source>About Qt</source>
<translation>Sobre Qt</translation>
</message>
<message>
<source>Show information about Qt</source>
<translation>Mostrar información sobre Qt</translation>
</message>
<message>
<source>Options...</source>
<translation>Opciones...</translation>
</message>
<message>
<source>Show / Hide</source>
<translation>Mostrar / Esconder</translation>
</message>
<message>
<source>Show or hide the main Window</source>
<translation>Mostrar o esconder la ventana principal</translation>
</message>
<message>
<source>Encrypt Wallet...</source>
<translation>Encriptar monedero...</translation>
</message>
<message>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Encriptar las claves privadas que pertenecen a tu monedero</translation>
</message>
<message>
<source>Backup Wallet...</source>
<translation>Copia de seguridad del monedero...</translation>
</message>
<message>
<source>Backup wallet to another location</source>
<translation>Copia de seguridad del monedero a otra ubicación</translation>
</message>
<message>
<source>Change Passphrase...</source>
<translation>Cambiar contraseña...</translation>
</message>
<message>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambiar contraseña usada para la encriptación del monedero</translation>
</message>
<message>
<source>Unlock Wallet...</source>
<translation>Desbloquear monedero...</translation>
</message>
<message>
<source>Unlock wallet</source>
<translation>Desbloquear monedero</translation>
</message>
<message>
<source>Lock Wallet</source>
<translation>Bloquear monedero</translation>
</message>
<message>
<source>Sign message...</source>
<translation>Firmar mensaje..</translation>
</message>
<message>
<source>Verify message...</source>
<translation>Verificar mensaje...</translation>
</message>
<message>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<source>Show diagnostic information</source>
<translation>Mostrar información de diagnóstico</translation>
</message>
<message>
<source>Debug console</source>
<translation>Consola de depuración</translation>
</message>
<message>
<source>Open debugging console</source>
<translation>Abrir consola de depuración</translation>
</message>
<message>
<source>Network Monitor</source>
<translation>Monitor de red</translation>
</message>
<message>
<source>Show network monitor</source>
<translation>Mostrar monitor de red</translation>
</message>
<message>
<source>Peers list</source>
<translation>Lista de Peers</translation>
</message>
<message>
<source>Show peers info</source>
<translation>Mostrar información de peers</translation>
</message>
<message>
<source>Wallet Repair</source>
<translation>Reparación del monedero</translation>
</message>
<message>
<source>Show wallet repair options</source>
<translation>Mostrar opciones de reparación del monedero</translation>
</message>
<message>
<source>Open configuration file</source>
<translation>Abrir archivo configuración</translation>
</message>
<message>
<source>Show Automatic Backups</source>
<translation>Mostrar copias de seguridad automatizadas</translation>
</message>
<message>
<source>Show automatically created wallet backups</source>
<translation>Mostrar las copias de seguridad creadas automáticamente</translation>
</message>
<message>
<source>Sending addresses...</source>
<translation>Direcciones de envío...</translation>
</message>
<message>
<source>Show the list of used sending addresses and labels</source>
<translation>Mostrar la lista y etiquetas de direcciones de envío usadas</translation>
</message>
<message>
<source>Receiving addresses...</source>
<translation>Dirección receptora</translation>
</message>
<message>
<source>Show the list of used receiving addresses and labels</source>
<translation>Mostrar la lista de las direcciones y etiquetas usadas</translation>
</message>
<message>
<source>Multisignature creation...</source>
<translation>Creación multifirmas ...</translation>
</message>
<message>
<source>Create a new multisignature address and add it to this wallet</source>
<translation>Crear una nueva dirección multifirma y agregarla a este monedero</translation>
</message>
<message>
<source>Multisignature spending...</source>
<translation>Gasto multifirmas...</translation>
</message>
<message>
<source>Spend from a multisignature address</source>
<translation>Gastar desde una dirección multifirmas</translation>
</message>
<message>
<source>Multisignature signing...</source>
<translation>Firma multifirmas...</translation>
</message>
<message>
<source>Sign with a multisignature address</source>
<translation>Firmar con una dirección multifirmas</translation>
</message>
<message>
<source>Open URI...</source>
<translation>Abrir URI...</translation>
</message>
<message>
<source>Command-line options</source>
<translation>Opciones de linea de comandos</translation>
</message>
<message numerus="yes">
<source>Processed %n blocks of transaction history.</source>
<translation><numerusform>Procesados %n bloques del histórico de transacciones.</numerusform><numerusform>Procesados %n bloques del histórico de transacciones.</numerusform></translation>
</message>
<message>
<source>Synchronizing additional data: %p%</source>
<translation>Sincronizando datos adicionales: %p%</translation>
</message>
<message>
<source>%1 behind. Scanning block %2</source>
<translation>%1 detrás. Escaneando bloque %2</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b> for anonymization and staking only</source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>desbloqueado</b> para anonimato y prueba de participación.</translation>
</message>
<message>
<source>File</source>
<translation>Archivo</translation>
</message>
<message>
<source>Settings</source>
<translation>Ajustes</translation>
</message>
<message>
<source>Tools</source>
<translation>Herramientas</translation>
</message>
<message>
<source>Help</source>
<translation>Ayuda</translation>
</message>
<message>
<source>Tabs toolbar</source>
<translation>Herramienta de pestañas</translation>
</message>
<message>
<source>GTR Core</source>
<translation>GTR Core</translation>
</message>
<message>
<source>Send coins to a GTR address</source>
<translation>Enviar monedas a una dirección GTR</translation>
</message>
<message>
<source>Request payments (generates QR codes and GTR: URIs)</source>
<translation>Solicitud de pago (genera un código QR y URIs)</translation>
</message>
<message>
<source>Privacy</source>
<translation>Privacidad</translation>
</message>
<message>
<source>Masternodes</source>
<translation>Masternodes</translation>
</message>
<message>
<source>Browse masternodes</source>
<translation>Explorar masternodes</translation>
</message>
<message>
<source>About GTR Core</source>
<translation>Sobre GTR Core</translation>
</message>
<message>
<source>Show information about GTR Core</source>
<translation>Mostrar información sobre GTR Core</translation>
</message>
<message>
<source>Modify configuration options for GTR</source>
<translation>Modificar las opciones de configuración de GTR</translation>
</message>
<message>
<source>Sign messages with your GTR addresses to prove you own them</source>
<translation>Firmar mensajes con sus direcciones GTR para demostrar que le pertenecen</translation>
</message>
<message>
<source>Verify messages to ensure they were signed with specified GTR addresses</source>
<translation>Verificar mensajes para asegurar que están firmados con la dirección GTR especificada</translation>
</message>
<message>
<source>BIP38 tool</source>
<translation>Herramienta BIP38</translation>
</message>
<message>
<source>Encrypt and decrypt private keys using a passphrase</source>
<translation>Encriptar y desencriptar las llaves privadas usando una frase contraseña</translation>
</message>
<message>
<source>MultiSend</source>
<translation>MultiEnvío</translation>
</message>
<message>
<source>MultiSend Settings</source>
<translation>Configuración de MultiEnvío</translation>
</message>
<message>
<source>Open Wallet Configuration File</source>
<translation>Abrir fichero del monedero Configuración</translation>
</message>
<message>
<source>Open Masternode Configuration File</source>
<translation>Abrir Fichero de Configuración de Masternodes</translation>
</message>
<message>
<source>Open Masternode configuration file</source>
<translation>Abrir fichero de Configuración de masternodes</translation>
</message>
<message>
<source>Open a GTR: URI or payment request</source>
<translation>Abrir un GTR: URI o solicitud de pago</translation>
</message>
<message>
<source>Blockchain explorer</source>
<translation>Explorador de Blockchain</translation>
</message>
<message>
<source>Block explorer window</source>
<translation>Ventana del explorador de bloques</translation>
</message>
<message>
<source>Show the GTR Core help message to get a list with possible GTR command-line options</source>
<translation>Mostrar la ayuda de GTR Core para obtener una lista de posibles opciones en línea de comandos</translation>
</message>
<message>
<source>GTR Core client</source>
<translation>Cliente GTR Core</translation>
</message>
<message numerus="yes">
<source>%n active connection(s) to GTR network</source>
<translation><numerusform>%n conexión(es) activas a la red GTR</numerusform><numerusform>%n conexión(es) activas a la red GTR</numerusform></translation>
</message>
<message>
<source>Synchronizing with network...</source>
<translation>Sincronizando con la red...</translation>
</message>
<message>
<source>Importing blocks from disk...</source>
<translation>Importando bloques de disco...</translation>
</message>
<message>
<source>Reindexing blocks on disk...</source>
<translation>Reindexando bloques en el disco...</translation>
</message>
<message>
<source>No block source available...</source>
<translation>Fuente de bloques no disponible...</translation>
</message>
<message>
<source>Up to date</source>
<translation>Actualizado</translation>
</message>
<message numerus="yes">
<source>%n hour(s)</source>
<translation><numerusform>%n horas</numerusform><numerusform>%n horas</numerusform></translation>
</message>
<message numerus="yes">
<source>%n day(s)</source>
<translation><numerusform>%n días</numerusform><numerusform>%n días</numerusform></translation>
</message>
<message numerus="yes">
<source>%n week(s)</source>
<translation><numerusform>%n semanas</numerusform><numerusform>%n semanas</numerusform></translation>
</message>
<message>
<source>%1 and %2</source>
<translation>%1 y %2</translation>
</message>
<message numerus="yes">
<source>%n year(s)</source>
<translation><numerusform>%n años</numerusform><numerusform>%n años</numerusform></translation>
</message>
<message>
<source>Catching up...</source>
<translation>Recogiendo...</translation>
</message>
<message>
<source>Last received block was generated %1 ago.</source>
<translation>El último bloque recibido se generó hace %1.</translation>
</message>
<message>
<source>Transactions after this will not yet be visible.</source>
<translation>Las transacciones posteriores todavía no son visibles.</translation>
</message>
<message>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<source>Warning</source>
<translation>Advertencia</translation>
</message>
<message>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<source>Sent transaction</source>
<translation>Transacción enviada</translation>
</message>
<message>
<source>Incoming transaction</source>
<translation>Transacción entrante</translation>
</message>
<message>
<source>Sent MultiSend transaction</source>
<translation>Transacción MultiEnvío emitida</translation>
</message>
<message>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Fecha: %1
Cantidad: %2
Tipo: %3
Dirección: %4
</translation>
</message>
<message>
<source>Staking is active
MultiSend: %1</source>
<translation>Staking está activado
MultiEnvío: %1</translation>
</message>
<message>
<source>Active</source>
<translation>Activo</translation>
</message>
<message>
<source>Not Active</source>
<translation>Inactivo</translation>
</message>
<message>
<source>Staking is not active
MultiSend: %1</source>
<translation>Staking inactiva
MultiEnvío: %1</translation>
</message>
<message>
<source>AutoMint is currently enabled and set to </source>
<translation>AutoMint está actualmente habilitado y configurado en</translation>
</message>
<message>
<source>AutoMint is disabled</source>
<translation>AutoMint está desactivado</translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>El monedero está <b>encriptado</b> y actualmente <b>desbloqueado</b></translation>
</message>
<message>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>El monedero está <b>encriptado</b> y actualmente <b>bloqueado</b></translation>
</message>
</context>
<context>
<name>BlockExplorer</name>
<message>
<source>Blockchain Explorer</source>
<translation>Explorador del blockchain</translation>
</message>
<message>
<source>Back</source>
<translation>Atrás</translation>
</message>
<message>
<source>Forward</source>
<translation>Adelante</translation>
</message>
<message>
<source>Address / Block / Transaction</source>
<translation>Dirección / Bloque / Transacción</translation>
</message>
<message>
<source>Search</source>
<translation>Buscar</translation>
</message>
<message>
<source>TextLabel</source>
<translation>EtiquetaDeTexto</translation>
</message>
<message>
<source>Not all transactions will be shown. To view all transactions you need to set txindex=1 in the configuration file (GTR.conf).</source>
<translation>No se muestran todas las transacciones. Para ver todas las transacciones introduzca la línea "txindex=1" en el archivo de configuración (GTR.conf).</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<source>Total: %1 (IPv4: %2 / IPv6: %3 / Tor: %4 / Unknown: %5)</source>
<translation>Total: %1 (IPv4: %2 / IPv6: %3 / Tor: %4 / Desconocido: %5)</translation>
</message>
<message>
<source>Network Alert</source>
<translation>Alerta de Red</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>Octetos:</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<source>Fee:</source>
<translation>Comisión:</translation>
</message>
<message>
<source>Coin Selection</source>
<translation>Selección de Moneda</translation>
</message>
<message>
<source>Dust:</source>
<translation>Calderilla:</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Después de Comisión:</translation>
</message>
<message>
<source>Change:</source>
<translation>Cambio:</translation>
</message>
<message>
<source>(un)select all</source>
<translation>(de)seleccionar todos</translation>
</message>
<message>
<source>toggle lock state</source>
<translation>cambiar estado de bloqueo</translation>
</message>
<message>
<source>Tree mode</source>
<translation>Modo de Árbol</translation>
</message>
<message>
<source>List mode</source>
<translation>Modo de Lista</translation>
</message>
<message>
<source>(1 locked)</source>
<translation>(1 bloqueado)</translation>
</message>
<message>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<source>Received with label</source>
<translation>Recibido con etiqueta</translation>
</message>
<message>
<source>Received with address</source>
<translation>Recibido con dirección</translation>
</message>
<message>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<source>Confirmations</source>
<translation>Confirmaciones</translation>
</message>
<message>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<source>Priority</source>
<translation>Prioridad</translation>
</message>
<message>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>Copiar código de transacción</translation>
</message>
<message>
<source>Lock unspent</source>
<translation>Bloquear no gastado</translation>
</message>
<message>
<source>Unlock unspent</source>
<translation>Desbloquear no gastado</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy fee</source>
<translation>Copiar comisión</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>Copiar después de comisión</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>Copiar octetos</translation>
</message>
<message>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<source>Copy dust</source>
<translation>Copiar calderilla</translation>
</message>
<message>
<source>Copy change</source>
<translation>Copiar cambio</translation>
</message>
<message>
<source>Please switch to "List mode" to use this function.</source>
<translation>Por favor cambie a "Modo de Lista" para utilizar esta función.</translation>
</message>
<message>
<source>highest</source>
<translation>la más alta</translation>
</message>
<message>
<source>higher</source>
<translation>más alta</translation>
</message>
<message>
<source>high</source>
<translation>alta</translation>
</message>
<message>
<source>medium-high</source>
<translation>medio-alta</translation>
</message>
<message>
<source>medium</source>
<translation>media</translation>
</message>
<message>
<source>low-medium</source>
<translation>baja-media</translation>
</message>
<message>
<source>low</source>
<translation>baja</translation>
</message>
<message>
<source>lower</source>
<translation>muy baja</translation>
</message>
<message>
<source>lowest</source>
<translation>la más baja</translation>
</message>
<message>
<source>(%1 locked)</source>
<translation>(%1 bloqueado)</translation>
</message>
<message>
<source>none</source>
<translation>ninguno</translation>
</message>
<message>
<source>yes</source>
<translation>sí</translation>
</message>
<message>
<source>no</source>
<translation>no</translation>
</message>
<message>
<source>This label turns red, if the transaction size is greater than 1000 bytes.</source>
<translation>Esta etiqueta se pone roja, si el tamaño de la transacción es mayor de 1000 bytes.</translation>
</message>
<message>
<source>This means a fee of at least %1 per kB is required.</source>
<translation>Esto significa que se requiere una comisión de al menos %1 por kB.</translation>
</message>
<message>
<source>Can vary +/- 1 byte per input.</source>
<translation>Puede variar +/- 1 byte por entrada.</translation>
</message>
<message>
<source>Transactions with higher priority are more likely to get included into a block.</source>
<translation>Las transacciones con alta prioridad tienen más probabilidades de ser incluidas en un bloque.</translation>
</message>
<message>
<source>This label turns red, if the priority is smaller than "medium".</source>
<translation>Esta etiqueta se vuelve roja, si la prioridad es inferior a "media".</translation>
</message>
<message>
<source>This label turns red, if any recipient receives an amount smaller than %1.</source>
<translation>Esta etiqueta se vuelve roja, si algún destinatario recibe una cantidad menor que %1.</translation>
</message>
<message>
<source>Can vary +/- %1 uGTR per input.</source>
<translation>Puede variar +/- %1 uGTR por entrada.</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<source>change from %1 (%2)</source>
<translation>cambio desde %1 (%2)</translation>
</message>
<message>
<source>(change)</source>
<translation>(cambio)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<source>Edit Address</source>
<translation>Editar Dirección</translation>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>The label associated with this address list entry</source>
<translation>La etiqueta asociada con esta entrada de la libreta de direcciones</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<source>The address associated with this address list entry. This can only be modified for sending addresses.</source>
<translation>La dirección asociada con esta entrada de la libreta de direcciones. Ésta sólo puede ser modificada en las direcciones de envío.</translation>
</message>
<message>
<source>New receiving address</source>
<translation>Nueva dirección de cobro</translation>
</message>
<message>
<source>New sending address</source>
<translation>Nueva dirección de envío</translation>
</message>
<message>
<source>Edit receiving address</source>
<translation>Editar dirección de cobro</translation>
</message>
<message>
<source>Edit sending address</source>
<translation>Editar dirección de envío</translation>
</message>
<message>
<source>The entered address "%1" is not a valid GTR address.</source>
<translation>La dirección introducida "%1" no es una dirección GTR válida.</translation>
</message>
<message>
<source>The entered address "%1" is already in the address book.</source>
<translation>La dirección introducida "%1" ya está en la libreta de direcciones.</translation>
</message>
<message>
<source>Could not unlock wallet.</source>
<translation>No se pudo desbloquear el monedero.</translation>
</message>
<message>
<source>New key generation failed.</source>
<translation>La generación de llave nueva falló.</translation>
</message>
</context>
<context>
<name>FreespaceChecker</name>
<message>
<source>A new data directory will be created.</source>
<translation>Se creará una nueva carpeta de datos.</translation>
</message>
<message>
<source>name</source>
<translation>nombre</translation>
</message>
<message>
<source>Directory already exists. Add %1 if you intend to create a new directory here.</source>
<translation>El directorio ya existe, Añada %1 si pretende crear una nueva carpeta aquí.</translation>
</message>
<message>
<source>Path already exists, and is not a directory.</source>
<translation>La ruta ya existe, y no es una carpeta.</translation>
</message>
<message>
<source>Cannot create data directory here.</source>
<translation>No se puede crear un directorio de datos aquí.</translation>
</message>
</context>
<context>
<name>HelpMessageDialog</name>
<message>
<source>version</source>
<translation>versión</translation>
</message>
<message>
<source>GTR Core</source>
<translation>GTR Core</translation>
</message>
<message>
<source>(%1-bit)</source>
<translation>(%1-bit)</translation>
</message>
<message>
<source>About GTR Core</source>
<translation>Acerca de GTR Core</translation>
</message>
<message>
<source>Command-line options</source>
<translation>Opciones de línea de comandos</translation>
</message>
<message>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<source>command-line options</source>
<translation>opciones de línea de comandos</translation>
</message>
<message>
<source>UI Options:</source>
<translation>Opciones de interfaz de usuario:</translation>
</message>
<message>
<source>Choose data directory on startup (default: %u)</source>
<translation>Elija la carpeta de datos al arrancar (por defecto: %u)</translation>
</message>
<message>
<source>Show splash screen on startup (default: %u)</source>
<translation>Mostrar pantalla de bienvenida al arrancar (por defecto: %u)</translation>
</message>
<message>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Seleccionar el idioma, por ejemplo "es_ES" (por defecto: idioma del sistema)</translation>
</message>
<message>
<source>Start minimized</source>
<translation>Arrancar minimizado</translation>
</message>
<message>
<source>Set SSL root certificates for payment request (default: -system-)</source>
<translation>Elegir certificado raíz SSL para solicitud de pago (por defecto: -sistema-)</translation>
</message>
</context>
<context>
<name>Intro</name>
<message>
<source>Welcome</source>
<translation>Bienvenido/a</translation>
</message>
<message>
<source>Welcome to GTR Core.</source>
<translation>Bienvenido/a a GTR Core.</translation>
</message>
<message>
<source>As this is the first time the program is launched, you can choose where GTR Core will store its data.</source>
<translation>Al ser la primera vez que se inicia el programa, usted puede elegir dónde guardará GTR Core sus datos.</translation>
</message>
<message>
<source>GTR Core will download and store a copy of the GTR block chain. At least %1GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source>
<translation>GTR Core descargará y guardará una copia de la cadena de bloques GTR. Por lo menos %1GB de datos serán guardados en esta carpeta, y crecerá con el tiempo. El monedero también se guardará en esta carpeta.</translation>
</message>
<message>
<source>Use the default data directory</source>
<translation>Usar la carpeta de datos por defecto</translation>
</message>
<message>
<source>Use a custom data directory:</source>
<translation>Usar una carpeta de datos personalizada:</translation>
</message>
<message>
<source>GTR Core</source>
<translation>GTR Core</translation>
</message>
<message>
<source>Error: Specified data directory "%1" cannot be created.</source>
<translation>Error: La carpeta de datos especificada "%1" no pudo ser creada.</translation>
</message>
<message>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<source>%1 GB of free space available</source>
<translation>%1 GB de espacio libre en disco</translation>
</message>
<message>
<source>(of %1 GB needed)</source>
<translation>(de %1 GB necesarios)</translation>
</message>
</context>
<context>
<name>MasternodeList</name>
<message>
<source>Form</source>
<translation>Formulario</translation>
</message>
<message>
<source>MASTERNODES</source>
<translation>NODO MAESTROS</translation>
</message>
<message>
<source>Note: Status of your masternodes in local wallet can potentially be slightly incorrect.<br />Always wait for wallet to sync additional data and then double check from another node<br />if your node should be running but you still see "MISSING" in "Status" field.</source>
<translation>Nota: El estado de tus nodos maestros en el monedero local podría ser incorrecto.<br />Siempre espere a que el monedero sincronice la información adicional y entonces compruébelo desde otro nodo<br />si su nodo debería estar funcionando pero aún vee el mensaje "FALTA" en el campo "Estado".</translation>
</message>
<message>
<source>Alias</source>
<translation>Apodo</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<source>Protocol</source>
<translation>Protocolo</translation>
</message>
<message>
<source>Status</source>
<translation>Estado</translation>
</message>
<message>
<source>Active</source>
<translation>Activo</translation>
</message>
<message>
<source>Last Seen (UTC)</source>
<translation>Visto por última vez (UTC)</translation>
</message>
<message>
<source>Pubkey</source>
<translation>Llave pública</translation>
</message>
<message>
<source>Start alias</source>
<translation>Iniciar apodo</translation>
</message>
<message>
<source>Start all</source>
<translation>Iniciar todo</translation>
</message>
<message>
<source>Start MISSING</source>
<translation>Iniciar FALTAN</translation>
</message>
<message>
<source>Update status</source>
<translation>Estado de Actualización</translation>
</message>
<message>
<source>Status will be updated automatically in (sec):</source>
<translation>El estado se actualizará automáticamente en (seg):</translation>
</message>
<message>
<source>0</source>
<translation>0</translation>
</message>
<message>
<source>Start alias</source>
<translation>Iniciar apodo</translation>
</message>
<message>
<source>Confirm masternode start</source>
<translation>Confirmar inicio del masternode</translation>
</message>
<message>
<source>Are you sure you want to start masternode %1?</source>
<translation>¿Está seguro de que quiere arrancar el masternode %1?</translation>
</message>
<message>
<source>Confirm all masternodes start</source>
<translation>Confirmar el arranque de todos los masternodes</translation>
</message>
<message>
<source>Are you sure you want to start ALL masternodes?</source>
<translation>¿Estás seguro de querer arrancar TODOS los masternodes?</translation>
</message>
<message>
<source>Command is not available right now</source>
<translation>El comando no está disponible en este momento</translation>
</message>
<message>
<source>You can't use this command until masternode list is synced</source>
<translation>No se puede utilizar este comando hasta que la lista de nodos maestros esté sincronizada</translation>
</message>
<message>
<source>Confirm missing masternodes start</source>
<translation>Confirmar arranque de masternodes no encontrados</translation>
</message>
<message>
<source>Are you sure you want to start MISSING masternodes?</source>
<translation>¿Estás seguro de querer arrancar los masternodes NO ENCONTRADOS?</translation>
</message>
</context>
<context>
<name>MultiSendDialog</name>
<message>
<source>MultiSend</source>
<translation>MultiEnvío</translation>
</message>
<message>
<source>Enter whole numbers 1 - 100</source>
<translation>Introduzca números enteros 1 - 100</translation>
</message>
<message>
<source>Enter % to Give (1-100)</source>
<translation>Introduzca % a Dar (1-100)</translation>
</message>
<message>
<source>Enter Address to Send to</source>
<translation>Introduzca Dirección a la cual Enviar</translation>
</message>
<message>
<source>MultiSend allows you to automatically send up to 100% of your stake or masternode reward to a list of other GTR addresses after it matures.
To Add: enter percentage to give and GTR address to add to the MultiSend vector.
To Delete: Enter address to delete and press delete.
MultiSend will not be activated unless you have clicked Activate</source>
<translation>MultiEnvío le permite enviar automáticamente hasta el 100% de recompensa de participación o de nodo maestro a una lista de otras direcciones GTR después de su maduración.
Para Añadir: introduzca porcentaje a enviar y las direcciones GTR a añadir al vector MultiEnvío.
Para Eliminar: Introduzca la dirección a eliminar y pulse la tecla delete.
MultiEnvío no se activará a menos que haga usted click en Activar</translation>
</message>
<message>
<source>Add to MultiSend Vector</source>
<translation>Añadir al vector MultiSend</translation>
</message>
<message>
<source>Add</source>
<translation>Añadir</translation>
</message>
<message>
<source>Deactivate MultiSend</source>
<translation>Desactivar MultiEnvío</translation>
</message>
<message>
<source>Deactivate</source>
<translation>Desactivar</translation>
</message>
<message>
<source>Choose an address from the address book</source>
<translation>Seleccione una dirección de la libreta de direcciones</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Percentage of stake to send</source>
<translation>Porcentaje de stake a enviar</translation>
</message>
<message>
<source>Percentage:</source>
<translation>Porcentaje:</translation>
</message>
<message>
<source>Address to send portion of stake to</source>
<translation>Dirección a enviar porción del stake</translation>
</message>
<message>
<source>Address:</source>
<translation>Dirección:</translation>
</message>
<message>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<source>Enter a label for this address to add it to your address book</source>
<translation>Introduzca una etiqueta para esta dirección para añadirla a su libreta de direcciones</translation>
</message>
<message>
<source>Delete Address From MultiSend Vector</source>
<translation>Borrar Dirección De Vector MultiEnvío</translation>
</message>
<message>
<source>Delete</source>
<translation>Borrar</translation>
</message>
<message>
<source>Activate MultiSend</source>
<translation>Activar MultiEnvío</translation>
</message>
<message>
<source>Activate</source>
<translation>Activar</translation>
</message>
<message>
<source>View MultiSend Vector</source>
<translation>Ver Vector MultiEnvío</translation>
</message>
<message>
<source>View MultiSend</source>
<translation>Ver MultiEnvío</translation>
</message>
<message>
<source>Send For Stakes</source>
<translation>Enviar Para Stakes</translation>
</message>
<message>
<source>Send For Masternode Rewards</source>
<translation>Enviar Para Recomensas De masternodes</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<source>The entered address:
</source>
<translation>La dirección introducida:
</translation>
</message>
<message>
<source> is invalid.
Please check the address and try again.</source>
<translation>es inválida.
Por favor compruebe la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<source>The total amount of your MultiSend vector is over 100% of your stake reward
</source>
<translation>La cantidad total de su vector MultiEnvío es superior al 100% de su recompensa de stake
</translation>
</message>
<message>
<source>Please Enter 1 - 100 for percent.</source>
<translation>Por favor Introduzca 1 - 100 por ciento.</translation>
</message>
<message>
<source>Saved the MultiSend to memory, but failed saving properties to the database.
</source>
<translation>Se guardó MultiEnvío en la memoria, pero se produjo un error al guardar las propiedades en la base de datos.
</translation>
</message>
<message>
<source>MultiSend Vector
</source>
<translation>Vector MultiEnvío
</translation>
</message>
<message>
<source>Removed </source>
<translation>Borrado</translation>
</message>
<message>
<source>Could not locate address
</source>
<translation>No se pudo localizar la dirección
</translation>
</message>
</context>
<context>
<name>MultisigDialog</name>
<message>
<source>Multisignature Address Interactions</source>
<translation>Interacciones de direcciones multifirmas</translation>
</message>
<message>
<source>Create MultiSignature Address</source>
<translation>Crear una dirección multifirma</translation>
</message>
<message>
<source>How many people must sign to verify a transaction</source>
<translation>Cuántas personas deben firmar para verificar una transacción</translation>
</message>
<message>
<source>Enter the minimum number of signatures required to sign transactions</source>
<translation>Entra el número mínimo de firmas requerido para firmar la transacción</translation>
</message>
<message>
<source>Address Label:</source>
<translation>Etiqueta de dirección</translation>
</message>
<message>
<source>Add another address that could sign to verify a transaction from the multisig address.</source>
<translation>Agrega otra dirección que pueda firmar para verificar la transacción desde la dirección multifirmas</translation>
</message>
<message>
<source>Add Address / Key</source>
<translation>Agrega dirección / Llave</translation>
</message>
<message>
<source>Local addresses or public keys that can sign:</source>
<translation>Direcciones locales o llaves publicas que puedan firmar:</translation>
</message>
<message>
<source>Create a new multisig address</source>
<translation>Crea una nueva dirección multifirmas</translation>
</message>
<message>
<source>Create</source>
<translation>Crear</translation>
</message>
<message>
<source>Status:</source>
<translation>Estado:</translation>
</message>
<message>
<source>Use below to quickly import an address by its redeem. Don't forget to add a label before clicking import!
Keep in mind, the wallet will rescan the blockchain to find transactions containing the new address.
Please be patient after clicking import.</source>
<translation>Use debajo para importar rápidamente una dirección al canjearla. ¡No olvide agregar una etiqueta antes de hacer clic en importar!
Tenga en cuenta que el monedero volverá a explorar el blockchain para buscar transacciones que contengan la nueva dirección.
Por favor, tenga paciencia después de hacer clic en importar.</translation>
</message>
<message>
<source>Import Redeem</source>
<translation>Importar redimir</translation>
</message>
<message>
<source>Create MultiSignature Tx</source>
<translation>Crear transacción multifirmas</translation>
</message>
<message>
<source>Inputs:</source>
<translation>Entradas:</translation>
</message>
<message>
<source>Coin Control</source>
<translation>Control de moneda</translation>
</message>
<message>
<source>Quantity Selected:</source>
<translation>Cantidad Seleccionada:</translation>
</message>
<message>
<source>0</source>
<translation>0</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Add an input to fund the outputs</source>
<translation>Agregue una entrada para financiar las salidas</translation>
</message>
<message>
<source>Add a Raw Input</source>
<translation>Agregar una entrada sin procesar</translation>
</message>
<message>
<source>Address / Amount:</source>
<translation>Dirección / Cantidad:</translation>
</message>
<message>
<source>Add destinations to send GTR to</source>
<translation>Agregue los destinos para enviar GTR</translation>
</message>
<message>
<source>Add Destination</source>
<translation>Agregar Destino</translation>
</message>
<message>
<source>Create a transaction object using the given inputs to the given outputs</source>
<translation>Crear un objeto de transacción usando las entradas dadas a las salidas dadas</translation>
</message>
<message>
<source>Create</source>
<translation>Crear</translation>
</message>
<message>
<source>Sign MultiSignature Tx</source>
<translation>Firmar Tx multifirmas</translation>
</message>
<message>
<source>Transaction Hex:</source>
<translation>Hexadecimal de transacción:</translation>
</message>
<message>
<source>Sign the transaction from this wallet or from provided private keys</source>
<translation>Firma la transacción desde este monedero o desde llaves privadas provistas</translation>
</message>
<message>
<source>Sign</source>
<translation>Firmar</translation>
</message>
<message>
<source><html><head/><body><p>DISABLED until transaction has been signed enough times.</p></body></html></source>
<translation><html><head/><body><p>DESACTIVADO hasta que la transacción se haya firmado suficientes veces.</p></body></html></translation>
</message>
<message>
<source>Commit</source>
<translation>Confirmar</translation>
</message>
<message>
<source>Add private keys to sign the transaction with</source>
<translation>Agregar llaves privadas para firmar la transacción</translation>
</message>
<message>
<source>Add Private Key</source>
<translation>Agregar llave privada</translation>
</message>
<message>
<source>Sign with only private keys (Not Recommened)</source>
<translation>Firmar solo con llaves privadas (No Recomendado)</translation>
</message>
<message>
<source>Invalid Tx Hash.</source>
<translation>Hash Tx inválido.</translation>
</message>
<message>
<source>Vout position must be positive.</source>
<translation>La posición de Vout debe ser positiva.</translation>
</message>
<message>
<source>Maximum possible addresses reached. (15)</source>
<translation>Máxima cantidad de direcciones posibles alcanzadas. (15)</translation>
</message>
<message>
<source>Vout Position: </source>
<translation>Posición Vout:</translation>
</message>
<message>
<source>Amount: </source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Maximum (15)</source>
<translation>Máximo (15)</translation>
</message>
</context>
<context>
<name>ObfuscationConfig</name>
<message>
<source>Configure Obfuscation</source>
<translation>Configurar Ofuscación</translation>
</message>
<message>
<source>Basic Privacy</source>
<translation>Privacidad Básica</translation>
</message>
<message>
<source>High Privacy</source>
<translation>Alta Privacidad</translation>
</message>
<message>
<source>Maximum Privacy</source>
<translation>Máxima Privacidad</translation>
</message>
<message>
<source>Please select a privacy level.</source>
<translation>Por favor seleccione un nivel de privacidad.</translation>
</message>
<message>
<source>Use 2 separate masternodes to mix funds up to 10000 GTR</source>
<translation>Use 2 masterdodes diferentes para mezclar los fondos hasta los 10000 GTR</translation>
</message>
<message>
<source>Use 8 separate masternodes to mix funds up to 10000 GTR</source>
<translation>Use 8 masternodes diferentes para mezclar fondos hasta los 10000 GTR</translation>
</message>
<message>
<source>Use 16 separate masternodes</source>
<translation>Use 16 masternodes diferentes</translation>
</message>
<message>
<source>This option is the quickest and will cost about ~0.025 GTR to anonymize 10000 GTR</source>
<translation>Esta opción es la más rápida y costará alrededor de ~0.025 GTR para anonimizar 10000 GTR</translation>
</message>
<message>
<source>This option is moderately fast and will cost about 0.05 GTR to anonymize 10000 GTR</source>
<translation>Esta opción es moderadamente rápida y costará cerca de 0.05 GTR para anonimizar 10000 GTR</translation>
</message>
<message>
<source>This is the slowest and most secure option. Using maximum anonymity will cost</source>
<translation>Esta es la opción más lenta pero más segura. Usar el máximo anonimato costará</translation>
</message>
<message>
<source>0.1 GTR per 10000 GTR you anonymize.</source>
<translation>0.1 GTR por 10000 GTR que anonimizas.</translation>
</message>
<message>
<source>Obfuscation Configuration</source>
<translation>Configuración de Ofuscación</translation>
</message>
<message>
<source>Obfuscation was successfully set to basic (%1 and 2 rounds). You can change this at any time by opening GTR's configuration screen.</source>
<translation>La Ofuscación se configuró correctamente en modo básico (%1 y 2 rondas). Puedes cambiar esto en cualquier momento accediendo a la ventana de configuración de GTR.</translation>
</message>
<message>
<source>Obfuscation was successfully set to high (%1 and 8 rounds). You can change this at any time by opening GTR's configuration screen.</source>
<translation>La Ofuscación se activó correctamente en modo alto (%1 y 8 rondas). Usted puede cambiar esto en cualquier momento abriendo la ventana de configuración de GTR.</translation>
</message>
<message>
<source>Obfuscation was successfully set to maximum (%1 and 16 rounds). You can change this at any time by opening GTR's configuration screen.</source>
<translation>La Ofuscación se activó correctamente en su grado máximo (%1 y 16 rondas). Usted puede cambiar ésto en cualquier momento en la ventana de configuración de GTR.</translation>
</message>
</context>
<context>
<name>OpenURIDialog</name>
<message>
<source>Open URI</source>
<translation>Abrir URI</translation>
</message>
<message>
<source>Open payment request from URI or file</source>
<translation>Abrir solicitud de pago de URI o archivo</translation>
</message>
<message>
<source>URI:</source>
<translation>URI:</translation>
</message>
<message>
<source>Select payment request file</source>
<translation>Seleccione el fichero que contiene la solicitud de pago</translation>
</message>
<message>
<source>Select payment request file to open</source>
<translation>Seleccione el fichero de solicitud de pago que desea abrir</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<source>Options</source>
<translation>Opciones</translation>
</message>
<message>
<source>Main</source>
<translation>Principal</translation>
</message>
<message>
<source>Size of database cache</source>
<translation>Tamaño del caché de base de datos</translation>
</message>
<message>
<source>MB</source>
<translation>MB</translation>
</message>
<message>
<source>Number of script verification threads</source>
<translation>Número de procesos de verificación de scripts</translation>
</message>
<message>
<source>(0 = auto, <0 = leave that many cores free)</source>
<translation>(0 = auto, <0 = dejar libres tal número de cores)</translation>
</message>
<message>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<source>If you disable the spending of unconfirmed change, the change from a transaction<br/>cannot be used until that transaction has at least one confirmation.<br/>This also affects how your balance is computed.</source>
<translation>Si deselecciona el gasto de cambio no confirmado, las entradas de una transacción<br/>no podrán ser usadas hasta que esa transacción tenga al menos una confirmación.<br/>Esto también afecta a cómo se calcula su balance actual.</translation>
</message>
<message>
<source>Automatically open the GTR client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Abrir automáticamente el puerto del cliente GTR en el enrutador. Ésto sólo funciona si su enrutador soporta UPnP y esta función está activada.</translation>
</message>
<message>
<source>Accept connections from outside</source>
<translation>Aceptar conexiones del exterior</translation>
</message>
<message>
<source>Allow incoming connections</source>
<translation>Permitir conexiones entrantes</translation>
</message>
<message>
<source>Connect through SOCKS5 proxy (default proxy):</source>
<translation>Conectar a través de un proxy SOCKS5 (proxy por defecto):</translation>
</message>
<message>
<source>Expert</source>
<translation>Experto</translation>
</message>
<message>
<source>Automatically start GTR after logging in to the system.</source>
<translation>Arrancar GTR automáticamente después de identificarse en el sistema.</translation>
</message>
<message>
<source>Start GTR on system login</source>
<translation>Arrancar GTR al inicio del sistema</translation>
</message>
<message>
<source>Whether to show coin control features or not.</source>
<translation>Mostrar las características de control de monedas, o no</translation>
</message>
<message>
<source>Enable coin control features</source>
<translation>Activar funciones de control de monedas</translation>
</message>
<message>
<source>Show additional tab listing all your masternodes in first sub-tab<br/>and all masternodes on the network in second sub-tab.</source>
<translation>Mostrar pestaña adicional listando todos sus masternodes en la primera sub-pestaña<br/>y todos los masternodes de la red en la segunda sub-pestaña.</translation>
</message>
<message>
<source>Show Masternodes Tab</source>
<translation>Mostrar la pestaña de masternodes</translation>
</message>
<message>
<source>Spend unconfirmed change</source>
<translation>Gastar cambio no confirmado</translation>
</message>
<message>
<source>Network</source>
<translation>Red</translation>
</message>
<message>
<source>The user interface language can be set here. This setting will take effect after restarting GTR.</source>
<translation>El idioma de interface de usuario puede seleccionarse aquí. Este ajuste tomará efecto después de reiniciar GTR.</translation>
</message>
<message>
<source>Language missing or translation incomplete? Help contributing translations here:
https://www.transifex.com/GTR-project/GTR-project-translations</source>
<translation>¿Falta su lenguaje o la traducción está incompleta? Contribuya con las traducciones aquí:
https://www.transifex.com/GTR-project/GTR-project-translations</translation>
</message>
<message>
<source>Map port using UPnP</source>
<translation>Mapear un puerto utilizando UPnP</translation>
</message>
<message>
<source>Enable automatic minting of GTR units to zGTR</source>
<translation>Activar el minting automática de unidades GTR a zGTR</translation>
</message>
<message>
<source>Enable zGTR Automint</source>
<translation>Activar zGTR Automint</translation>
</message>
<message>
<source>Percentage of incoming GTR which get automatically converted to zGTR via Zerocoin Protocol (min: 10%)</source>
<translation>Porcentaje de GTR entrantes que serán automáticamente convertidos a zGTR a través del Protocolo Zerocoin (mín: 10%)</translation>
</message>
<message>
<source>Percentage of autominted zGTR</source>
<translation>Porcentaje de zGTR autogenerados</translation>
</message>
<message>
<source>Wait with automatic conversion to Zerocoin until enough GTR for this denomination is available</source>
<translation>Esperar con la conversión automática a Zerocoin hasta que hayan suficientes GTR disponibles para este tamaño de billete</translation>
</message>
<message>
<source>Preferred Automint zGTR Denomination</source>
<translation>Tamaño de billete zGTR preferido en la creación automática de dinero</translation>
</message>
<message>
<source>Stake split threshold:</source>
<translation>Parte limite de stake:</translation>
</message>
<message>
<source>Connect to the GTR network through a SOCKS5 proxy.</source>
<translation>Conectar a la red GTR mediante un proxy SOCKS5.</translation>
</message>
<message>
<source>Proxy IP:</source>
<translation>IP del proxy:</translation>
</message>
<message>
<source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source>
<translation>Dirección IP del proxy (p.e. IPv4: 127.0.0.1 / IPv6: ::1)</translation>
</message>
<message>
<source>Port:</source>
<translation>Puerto:</translation>
</message>
<message>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Puerto del proxy (p.e. 9050)</translation>
</message>
<message>
<source>Window</source>
<translation>Ventana</translation>
</message>
<message>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Mostrar sólo un icono en la bandeja al minimizar la ventana.</translation>
</message>
<message>
<source>Minimize to the tray instead of the taskbar</source>
<translation>Minimizar a la bandeja en lugar de a la barra de tareas</translation>
</message>
<message>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimizar en lugar de cerrar la aplicación cuando se cierra la ventana. Cuando active esta opción, tendrá que cerrar la aplicación seleccionando Salir desde el menú.</translation>
</message>
<message>
<source>Minimize on close</source>
<translation>Minimizar al cerrar</translation>
</message>
<message>
<source>Display</source>
<translation>Mostrar</translation>
</message>
<message>
<source>User Interface language:</source>
<translation>Idioma de la interface de usuario:</translation>
</message>
<message>
<source>User Interface Theme:</source>
<translation>Tema de la Interface de Usuario:</translation>
</message>
<message>
<source>Unit to show amounts in:</source>
<translation>Unidad para mostrar cantidades:</translation>
</message>
<message>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Seleccionar la subdivisión a mostrar por defecto en la interface y al enviar monedas.</translation>
</message>
<message>
<source>Decimal digits</source>
<translation>Dígitos decimales</translation>
</message>
<message>
<source>Hide empty balances</source>
<translation>Ocultar saldos vazios</translation>
</message>
<message>
<source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source>
<translation>Direcciones web de terceras partes (p.e. un explorador de bloques) que aparecen en la pestaña de transacciones como objetos contextuales del menú. %s en la dirección web es reemplazada por el identificador de la transacción. Múltiples direcciones web están separadas por una barra vertical (|).</translation>
</message>
<message>
<source>Third party transaction URLs</source>
<translation>Direcciones web de transacciones de terceras partes</translation>
</message>
<message>
<source>Active command-line options that override above options:</source>
<translation>Opciones de línea de comando activas que anulan las opciones anteriores:</translation>
</message>
<message>
<source>Reset all client options to default.</source>
<translation>Resetear todas las opciones de cliente a su valor por defecto.</translation>
</message>
<message>
<source>Reset Options</source>
<translation>Opciones de Reset</translation>
</message>
<message>
<source>OK</source>
<translation>OK</translation>
</message>
<message>
<source>Cancel</source>
<translation>Cancelar</translation>
</message>
<message>
<source>Any</source>
<translation>Qualquer</translation>
</message>
<message>
<source>default</source>
<translation>por defecto</translation>
</message>
<message>
<source>none</source>
<translation>ninguno</translation>
</message>
<message>
<source>Confirm options reset</source>
<translation>Confirmar reinicio de las opciones</translation>
</message>
<message>
<source>Client restart required to activate changes.</source>
<translation>Se requiere un reinicio del cliente para activar los cambios.</translation>
</message>
<message>
<source>Client will be shutdown, do you want to proceed?</source>
<translation>El cliente se cerrará, ¿procedemos?</translation>
</message>
<message>
<source>This change would require a client restart.</source>
<translation>Este cambio requerirá un reinicio del cliente.</translation>
</message>
<message>
<source>The supplied proxy address is invalid.</source>
<translation>La dirección proxy indicada es inválida.</translation>
</message>
<message>
<source>The supplied proxy port is invalid.</source>
<translation>El puerto proxy suministrado no es válido.</translation>
</message>
<message>
<source>The supplied proxy settings are invalid.</source>
<translation>La configuración del proxy suministrado no es válida.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<source>Form</source>
<translation>Formulario</translation>
</message>
<message>
<source>Available:</source>
<translation>Disponible:</translation>
</message>
<message>
<source>Your current spendable balance</source>
<translation>Tu balance actualmente disponible</translation>
</message>
<message>
<source>Total Balance, including all unavailable coins.</source>
<translation>Balance Total, incluidas todas las monedas no disponibles.</translation>
</message>
<message>
<source>GTR Balance</source>
<translation>Balance GTR</translation>
</message>
<message>
<source>Pending:</source>
<translation>Pendiente:</translation>
</message>
<message>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source>
<translation>Total de transacciones que aún deben ser confirmadas, y todavía no cuentan en el balance disponible</translation>
</message>
<message>
<source>Immature:</source>
<translation>Inmaduro:</translation>
</message>
<message>
<source>Staked or masternode rewards that has not yet matured</source>
<translation>Recompensa de participación o de nodo maestro que aún no ha madurado</translation>
</message>
<message>
<source>Current locked balance in watch-only addresses</source>
<translation>Saldo bloqueado actual en direcciones solo de lectura</translation>
</message>
<message>
<source>Your current GTR balance, unconfirmed and immature transactions included</source>
<translation>Su balance GTR actual, transacciones no confirmadas e inmaduras incluidas</translation>
</message>
<message>
<source>zGTR Balance</source>
<translation>Balance zGTR</translation>
</message>
<message>
<source>Mature: more than 20 confirmation and more than 1 mint of the same denomination after it was minted.
These zGTR are spendable.</source>
<translation>Maduración: más de 20 confirmaciones y más de 1 acuñación de la misma denominación después de acuñarse.
Estas zGTR son utilizables.</translation>
</message>
<message>
<source>Unconfirmed: less than 20 confirmations
Immature: confirmed, but less than 1 mint of the same denomination after it was minted</source>
<translation>Sin confirmar: menos de 20 confirmaciones
Inmaduro: confirmado, pero menos de 1 acuñación de la misma denominación después de ser acuñado</translation>
</message>
<message>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the GTR network after a connection is established, but this process has not completed yet.</source>
<translation>La información mostrada puede estar desactualizada. Su monedero se sincroniza automáticamente con la red GTR después de establecer una conexión, pero este proceso aún no se ha completado.</translation>
</message>
<message>
<source>OVERVIEW</source>
<translation>VISIÓN DE CONJUNTO</translation>
</message>
<message>
<source>Combined Balance (including unconfirmed and immature coins)</source>
<translation>Balance combinados (incluidas las monedas no confirmadas e inmaduras)</translation>
</message>
<message>
<source>Combined Balance</source>
<translation>Balance Combinado</translation>
</message>
<message>
<source>Unconfirmed transactions to watch-only addresses</source>
<translation>Transacciones no confirmadas a direcciones sólo de lectura</translation>
</message>
<message>
<source>Staked or masternode rewards in watch-only addresses that has not yet matured</source>
<translation>Recompensa stake o recompensa de masternodes en direcciones de sólo lectura que todavía no han madurado</translation>
</message>
<message>
<source>Total:</source>
<translation>Total:</translation>
</message>
<message>
<source>Current total balance in watch-only addresses</source>
<translation>Balance total actual en direcciones de sólo lectura</translation>
</message>
<message>
<source>Watch-only:</source>
<translation>Sólo-lectura:</translation>
</message>
<message>
<source>Your current balance in watch-only addresses</source>
<translation>Su balance actual en direcciones de sólo lectura</translation>
</message>
<message>
<source>Spendable:</source>
<translation>Disponible:</translation>
</message>
<message>
<source>Locked GTR or Masternode collaterals. These are excluded from zGTR minting.</source>
<translation>GTR bloqueado o Masternode colaterales. Estos están excluidos de minting zGTR.</translation>
</message>
<message>
<source>Locked:</source>
<translation>Bloqueado:</translation>
</message>
<message>
<source>Unconfirmed:</source>
<translation>Sin confirmar:</translation>
</message>
<message>
<source>Your current zGTR balance, unconfirmed and immature zGTR included.</source>
<translation>Su balance zGTR actual, zGTR inmaduro e inmaduro incluido.</translation>
</message>
<message>
<source>Recent transactions</source>
<translation>Transacciones recientes</translation>
</message>
<message>
<source>out of sync</source>
<translation>desincronizado</translation>
</message>
<message>
<source>Current percentage of zGTR.
If AutoMint is enabled this percentage will settle around the configured AutoMint percentage (default = 10%).
</source>
<translation>Porcentaje actual de zGTR.
Si AutoMint está habilitado, este porcentaje se establecerá alrededor del porcentaje de AutoMint configurado (predeterminado = 10%).
</translation>
</message>
<message>
<source>AutoMint is currently enabled and set to </source>
<translation>AutoMint está actualmente habilitado y configurado en</translation>
</message>
<message>
<source>To disable AutoMint add 'enablezeromint=0' in GTR.conf.</source>
<translation>Para desactivar AutoMint agrega 'enablezeromint=0' en GTR.conf.</translation>
</message>
<message>
<source>AutoMint is currently disabled.
To enable AutoMint change 'enablezeromint=0' to 'enablezeromint=1' in GTR.conf</source>
<translation>AutoMint está actualmente desactivado.
Para habilitar AutoMint cambie 'enablezeromint = 0' a 'enablezeromint = 1' en GTR.conf</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<source>Payment request error</source>
<translation>Error en la solicitud de pago</translation>
</message>
<message>
<source>URI handling</source>
<translation>Manejo de URI</translation>
</message>
<message>
<source>Payment request fetch URL is invalid: %1</source>
<translation>Solicitud de pago via web inválida: %1</translation>
</message>
<message>
<source>Payment request file handling</source>
<translation>Manejo del fichero de solicitud de pago</translation>
</message>
<message>
<source>Invalid payment address %1</source>
<translation>Dirección de pago inválida %1</translation>
</message>
<message>
<source>Cannot start GTR: click-to-pay handler</source>
<translation>No se puede iniciar GTR: módulo click-to-pay</translation>
</message>
<message>
<source>URI cannot be parsed! This can be caused by an invalid GTR address or malformed URI parameters.</source>
<translation>¡El identificador de la dirección no puede ser analizado! Esto puede ser causado por una dirección GTR inválida o parámetros del identificador malformados.</translation>
</message>
<message>
<source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source>
<translation>El fichero de solicitud de pago no se pudo leer! Esto puede estar causado por una solicitud de pago inválida.</translation>
</message>
<message>
<source>Payment request rejected</source>
<translation>Solicitud de pago rechazada</translation>
</message>
<message>
<source>Payment request network doesn't match client network.</source>
<translation>La solicitud de pago no coincide con la red del cliente.</translation>
</message>
<message>
<source>Payment request has expired.</source>
<translation>La solicitud de pago ha expirado.</translation>
</message>
<message>
<source>Payment request is not initialized.</source>
<translation>La solicitud de pago no está inicializada.</translation>
</message>
<message>
<source>Unverified payment requests to custom payment scripts are unsupported.</source>
<translation>Solicitudes de pago no verificadas a scripts de pago personalizado no están soportadas.</translation>
</message>
<message>
<source>Requested payment amount of %1 is too small (considered dust).</source>
<translation>La cantidad de pago solicitada de %1 es demasiado pequeña (se considera calderilla).</translation>
</message>
<message>
<source>Refund from %1</source>
<translation>Reembolso desde %1</translation>
</message>
<message>
<source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source>
<translation>La solicitud de pago %1 es demasiado larga (%2 bytes, permitidos %3 bytes).</translation>
</message>
<message>
<source>Payment request DoS protection</source>
<translation>Protección de Denegación de Servicio para la solicitud de pago</translation>
</message>
<message>
<source>Error communicating with %1: %2</source>
<translation>Error de comunicación con %1: %2</translation>
</message>
<message>
<source>Payment request cannot be parsed!</source>
<translation>¡La solicitud de pago no se pudo procesar!</translation>
</message>
<message>
<source>Bad response from server %1</source>
<translation>Respuesta inadecuada del servidor %1</translation>
</message>
<message>
<source>Network request error</source>
<translation>Error de Red</translation>
</message>
<message>
<source>Payment acknowledged</source>
<translation>Pago confirmado</translation>
</message>
</context>
<context>
<name>PeerTableModel</name>
<message>
<source>Address/Hostname</source>
<translation>Dirección/Nombre Host</translation>
</message>
<message>
<source>Version</source>
<translation>Versión</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Intervalo de Ping</translation>
</message>
</context>
<context>
<name>PrivacyDialog</name>
<message>
<source>Zerocoin Actions:</source>
<translation>Acciones Zerocoin:</translation>
</message>
<message>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the GTR network after a connection is established, but this process has not completed yet.</source>
<translation>La información mostrada puede estar desactualizada. Su monedero se sincroniza automáticamente con la red GTR después de establecer una conexión, pero este proceso aún no se ha completado.</translation>
</message>
<message>
<source>Mint Zerocoin</source>
<translation>Creación de moneda Zerocoin</translation>
</message>
<message>
<source>0</source>
<translation>0</translation>
</message>
<message>
<source>zGTR</source>
<translation>zGTR</translation>
</message>
<message>
<source>Available for minting are coins which are confirmed and not locked or Masternode collaterals.</source>
<translation>Disponibles para minting son monedas que están confirmadas y no bloqueadas o colateral de Masternode.</translation>
</message>
<message>
<source>Available for Minting:</source>
<translation>Disponible para minting:</translation>
</message>
<message>
<source>0.000 000 00 GTR</source>
<translation>0.000 000 00 GTR</translation>
</message>
<message>
<source>Reset Zerocoin Wallet DB. Deletes transactions that did not make it into the blockchain.</source>
<translation>Resetear la base de datos del monedero Zerocoin. Esta opción borra transacciones que no consiguieron salir publicadas en el blockchain.</translation>
</message>
<message>
<source>Reset</source>
<translation>Reiniciar</translation>
</message>
<message>
<source>Coin Control...</source>
<translation>Control de Monedas...</translation>
</message>
<message>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Amount:</source>
<translation>Suma:</translation>
</message>
<message>
<source>Rescan the complete blockchain for Zerocoin mints and their meta-data.</source>
<translation>Reescanear el blockchain entero buscando Zerocoin mints y su meta-datos.</translation>
</message>
<message>
<source>ReScan</source>
<translation>ReeScanear</translation>
</message>
<message>
<source>Status and/or Mesages from the last Mint Action.</source>
<translation>Estado y/o Mensajes de la última acción mint.</translation>
</message>
<message>
<source>PRIVACY</source>
<translation>PRIVACIDAD</translation>
</message>
<message>
<source>Enter an amount of GTR to convert to zGTR</source>
<translation>Introduzca la cantidad de GTR que desea convertir a zGTR</translation>
</message>
<message>
<source>zGTR Control</source>
<translation>Control zGTR</translation>
</message>
<message>
<source>zGTR Selected:</source>
<translation>zGTR Seleccionados:</translation>
</message>
<message>
<source>Quantity Selected:</source>
<translation>Cantidad Seleccionada:</translation>
</message>
<message>
<source>Spend Zerocoin. Without 'Pay To:' address creates payments to yourself.</source>
<translation>Gastar Zerocoin. Sin direcciones "Pagar a:", esto creará pagos a usted mismo.</translation>
</message>
<message>
<source>Spend Zerocoin</source>
<translation>Gastar Zerocoin</translation>
</message>
<message>
<source>Available (mature and spendable) zGTR for spending</source>
<translation>zGTR disponible (maduro y gastable) para gastar</translation>
</message>
<message>
<source>Available Balance:</source>
<translation>Balance disponible:</translation>
</message>
<message>
<source>Available (mature and spendable) zGTR for spending
zGTR are mature when they have more than 20 confirmations AND more than 2 mints of the same denomination after them were minted</source>
<translation>zGTR disponible (maduro y gastable) para gastar
zGTR son maduros cuando tienen más de 20 confirmaciones Y más de 2 mints de la misma denominación después que ellos fueron minted</translation>
</message>
<message>
<source>0 zGTR</source>
<translation>0 zGTR</translation>
</message>
<message>
<source>Security Level for Zerocoin Transactions. More is better, but needs more time and resources.</source>
<translation>Nivel de Seguridad para Transacciones Zerocoin. Cuanto más mejor, pero necesitará más tiempo y recursos.</translation>
</message>
<message>
<source>Security Level:</source>
<translation>Nivel de Seguridad:</translation>
</message>
<message>
<source>Security Level 1 - 100 (default: 42)</source>
<translation>Nivel de Seguridad 1 - 100 (por defecto: 42)</translation>
</message>
<message>
<source>Pay To:</source>
<translation>Pagar A:</translation>
</message>
<message>
<source>The GTR address to send the payment to. Creates local payment to yourself when empty.</source>
<translation>La dirección GTR a la que enviar el pago. Si se deja en blanco, crea un pago a usted mismo.</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Escoja una dirección usada previamente</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde el portapapeles</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation>Introduzca una etiqueta para esta dirección para añadirla a la lista de direcciones utilizadas</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Convert Change to Zerocoin (might cost additional fees)</source>
<translation>Convertir el Cambio a Zerocoin (puede costar alguna comisión adicional)</translation>
</message>
<message>
<source>If checked, the wallet tries to minimize the returning change instead of minimizing the number of spent denominations.</source>
<translation>Si está marcado, el monedero intenta minimizar el cambio de vuelta en lugar de minimizar el numero de denominaciones necesarios.</translation>
</message>
<message>
<source>Minimize Change</source>
<translation>Minimizar Cambio</translation>
</message>
<message>
<source>Information about the available Zerocoin funds.</source>
<translation>Información sobre los fondos Zerocoin disponibles.</translation>
</message>
<message>
<source>Zerocoin Stats:</source>
<translation>Estadísticas Zerocoin:</translation>
</message>
<message>
<source>Total Balance including unconfirmed and immature zGTR</source>
<translation>Balance total incluyendo zGTR no confirmado e inmaduro</translation>
</message>
<message>
<source>Total Zerocoin Balance:</source>
<translation>Balance total de Zerocoin:</translation>
</message>
<message>
<source>Denominations with value 1:</source>
<translation>Denominaciones con valor 1:</translation>
</message>
<message>
<source>Denom. with value 1:</source>
<translation>Denom. con valor 1:</translation>
</message>
<message>
<source>Unconfirmed: less than 20 confirmations
Immature: confirmed, but less than 1 mint of the same denomination after it was minted</source>
<translation>Sin confirmar: menos de 20 confirmaciones
Inmaduro: confirmado, pero menos de 1 acuñación de la misma denominación después de ser acuñado</translation>
</message>
<message>
<source>Show the current status of automatic zGTR minting.
To change the status (restart required):
- enable: add 'enablezeromint=1' to GTR.conf
- disable: add 'enablezeromint=0' to GTR.conf
To change the percentage (no restart required):
- menu Settings->Options->Percentage of autominted zGTR
</source>
<translation>Muestra el estado actual de zGTR minting automática.
Para cambiar el estado (reinicio requerido):
- activar: añade 'enablezeromint = 1' en GTR.conf
- desctivar: añade 'enablezeromint = 0' en GTR.conf
Para cambiar el porcentaje (no se requiere reiniciar):
- menú Configuración->Opciones->Porcentaje de zGTR automint
</translation>
</message>
<message>
<source>AutoMint Status</source>
<translation>Estado de AutoMint</translation>
</message>
<message>
<source>Global Supply:</source>
<translation>Suministro Global:</translation>
</message>
<message>
<source>Denom. 1:</source>
<translation>Denom. 1:</translation>
</message>
<message>
<source>Denom. 5:</source>
<translation>Denom. 5:</translation>
</message>
<message>
<source>Denom. 10:</source>
<translation>Denom. 10:</translation>
</message>
<message>
<source>Denom. 50:</source>
<translation>Denom. 50:</translation>
</message>
<message>
<source>Denom. 100:</source>
<translation>Denom. 100:</translation>
</message>
<message>
<source>Denom. 500:</source>
<translation>Denom. 500:</translation>
</message>
<message>
<source>Denom. 1000:</source>
<translation>Denom. 1000:</translation>
</message>
<message>
<source>Denom. 5000:</source>
<translation>Denom. 5000:</translation>
</message>
<message>
<source>0 x</source>
<translation>0 x</translation>
</message>
<message>
<source>Denominations with value 5:</source>
<translation>Denominación con valor 5:</translation>
</message>
<message>
<source>Denom. with value 5:</source>
<translation>Denom. con valor 5:</translation>
</message>
<message>
<source>Denominations with value 10:</source>
<translation>Denominación con valor 10:</translation>
</message>
<message>
<source>Denom. with value 10:</source>
<translation>Denom. con valor 10:</translation>
</message>
<message>
<source>Denominations with value 50:</source>
<translation>Denominación con valor 50:</translation>
</message>
<message>
<source>Denom. with value 50:</source>
<translation>Denom. con valor 50:</translation>
</message>
<message>
<source>Denominations with value 100:</source>
<translation>Denominación con valor 100:</translation>
</message>
<message>
<source>Denom. with value 100:</source>
<translation>Denom. con valor 100:</translation>
</message>
<message>
<source>Denominations with value 500:</source>
<translation>Denominación con valor 500:</translation>
</message>
<message>
<source>Denom. with value 500:</source>
<translation>Denom. con valor 500:</translation>
</message>
<message>
<source>Denominations with value 1000:</source>
<translation>Denominación con valor 1000:</translation>
</message>
<message>
<source>Denom. with value 1000:</source>
<translation>Denom. con valor 1000:</translation>
</message>
<message>
<source>Denominations with value 5000:</source>
<translation>Denominación con valor 5000:</translation>
</message>
<message>
<source>Denom. with value 5000:</source>
<translation>Denom. con valor 5000:</translation>
</message>
<message>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<source>TextLabel</source>
<translation>EtiquetaDeTexto</translation>
</message>
<message>
<source>Fee:</source>
<translation>Comisión:</translation>
</message>
<message>
<source>Dust:</source>
<translation>Calderilla:</translation>
</message>
<message>
<source>no</source>
<translation>no</translation>
</message>
<message>
<source>Bytes:</source>
<translation>Octetos:</translation>
</message>
<message>
<source>Insufficient funds!</source>
<translation>¡Fondos insuficientes!</translation>
</message>
<message>
<source>Coins automatically selected</source>
<translation>Monedas seleccionadas automáticamente</translation>
</message>
<message>
<source>medium</source>
<translation>media</translation>
</message>
<message>
<source>Coin Control Features</source>
<translation>Funciones de Control de Monedas</translation>
</message>
<message>
<source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source>
<translation>Si esto está activado, pero la dirección de cambio está vacía o es inválida, el cambio será mandado a una nueva dirección generada.</translation>
</message>
<message>
<source>Custom change address</source>
<translation>Dirección de cambio personalizada</translation>
</message>
<message>
<source>Amount After Fee:</source>
<translation>Cantidad Después de comisión:</translation>
</message>
<message>
<source>Change:</source>
<translation>Cambio:</translation>
</message>
<message>
<source>out of sync</source>
<translation>desincronizado</translation>
</message>
<message>
<source>Mint Status: Okay</source>
<translation>Estado de Creación de Moneda: Ok</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Starting ResetMintZerocoin: rescanning complete blockchain, this will need up to 30 minutes depending on your hardware.
Please be patient...</source>
<translation>Ejecutando ResetMintZerocoin: reescaneando el blockchain entero, esto necesitará hasta 30 minutos dependiendo de su hardware.
Por favor espere...</translation>
</message>
<message>
<source>Spending Zerocoin.
Computationally expensive, might need several minutes depending on the selected Security Level and your hardware.
Please be patient...</source>
<translation>Gastando Zerocoin.
Esto es computacionalmente intensivo, puede necesitar varios minutos dependiendo del Nivel de Seguridad elegido y el hardware de su máquina.
Por favor tenga paciencia...</translation>
</message>
<message>
<source>) needed.
Maximum allowed: </source>
<translation>) necesario.
Máximo permitido:</translation>
</message>
<message>
<source>zGTR Spend #: </source>
<translation>Gastar zGTR #: </translation>
</message>
<message>
<source>zGTR Mint</source>
<translation>Acuñar zGTR</translation>
</message>
<message>
<source> <b>enabled</b>.</source>
<translation><b>ativado</b>.</translation>
</message>
<message>
<source> <b>disabled</b>.</source>
<translation><b>Desativado</b>.</translation>
</message>
<message>
<source> Configured target percentage: <b></source>
<translation>Porcentaje objetivo configurado:</translation>
</message>
<message>
<source>zGTR is currently disabled due to maintenance.</source>
<translation>zGTR actualmente está desactivado debido al mantenimiento.</translation>
</message>
<message>
<source>zGTR is currently undergoing maintenance.</source>
<translation>zGTR se encuentra actualmente en mantenimiento.</translation>
</message>
<message>
<source>Denom. with value <b>1</b>:</source>
<translation>Billetes con valor <b> 1</b>:</translation>
</message>
<message>
<source>Denom. with value <b>5</b>:</source>
<translation>Billetes con valor <b> 5</b>:</translation>
</message>
<message>
<source>Denom. with value <b>10</b>:</source>
<translation>Billetes con valor <b> 10</b>:</translation>
</message>
<message>
<source>Denom. with value <b>50</b>:</source>
<translation>Billetes con valor <b>50</b>:</translation>
</message>
<message>
<source>Denom. with value <b>100</b>:</source>
<translation>Billetes con valor <b>100</b>:</translation>
</message>
<message>
<source>Denom. with value <b>500</b>:</source>
<translation>Billetes con valor <b>500</b>:</translation>
</message>
<message>
<source>Denom. with value <b>1000</b>:</source>
<translation>Billetes con valor <b>1000</b>:</translation>
</message>
<message>
<source>Denom. with value <b>5000</b>:</source>
<translation>Billetes con valor <b>5000</b>:</translation>
</message>
<message>
<source>AutoMint Status:</source>
<translation>Status AutoMint:</translation>
</message>
<message>
<source>Denom. <b>1</b>:</source>
<translation>Denom. <b>1</b>:</translation>
</message>
<message>
<source>Denom. <b>5</b>:</source>
<translation>Denom. <b>5</b>:</translation>
</message>
<message>
<source>Denom. <b>10</b>:</source>
<translation>Denom. <b>10</b>:</translation>
</message>
<message>
<source>Denom. <b>50</b>:</source>
<translation>Denom. <b>50</b>:</translation>
</message>
<message>
<source>Denom. <b>100</b>:</source>
<translation>Denom. <b>100</b>:</translation>
</message>
<message>
<source>Denom. <b>500</b>:</source>
<translation>Denom. <b>500</b>:</translation>
</message>
<message>
<source>Denom. <b>1000</b>:</source>
<translation>Denom. <b>1000</b>:</translation>
</message>
<message>
<source>Denom. <b>5000</b>:</source>
<translation>Denom. <b>5000</b>:</translation>
</message>
<message>
<source>Error: Your wallet is locked. Please enter the wallet passphrase first.</source>
<translation>Error: Tu monedero está bloqueado. Por favor, primero introduzca la frase clave del monedero.</translation>
</message>
<message>
<source>Message: Enter an amount > 0.</source>
<translation>Mensaje: Introduzca una cantidad > 0.</translation>
</message>
<message>
<source>Minting </source>
<translation>Minting</translation>
</message>
<message>
<source>Successfully minted </source>
<translation>Minted correctamente</translation>
</message>
<message>
<source> zGTR in </source>
<translation>zGTR en</translation>
</message>
<message>
<source> sec. Used denominations:
</source>
<translation>seg. denominación usados:
</translation>
</message>
<message>
<source>Duration: </source>
<translation>Duración:</translation>
</message>
<message>
<source> sec.
</source>
<translation>seg.
</translation>
</message>
<message>
<source>Starting ResetSpentZerocoin: </source>
<translation>Iniciando ResetSpentZerocoin:</translation>
</message>
<message>
<source>No 'Pay To' address provided, creating local payment</source>
<translation>No se especificó drección 'Pagar A', por lo que asumimos un pago local</translation>
</message>
<message>
<source>Invalid GTR Address</source>
<translation>Dirección GTR Inválida</translation>
</message>
<message>
<source>Invalid Send Amount</source>
<translation>Cantidad a Enviar Inválida</translation>
</message>
<message>
<source>Confirm additional Fees</source>
<translation>Confirmar comisiónes adicionales</translation>
</message>
<message>
<source>Are you sure you want to send?<br /><br /></source>
<translation>¿Está seguro que desea enviar?<br /><br /></translation>
</message>
<message>
<source> to address </source>
<translation>a la dirección</translation>
</message>
<message>
<source> to a newly generated (unused and therefore anonymous) local address <br /></source>
<translation>a una dirección local recién generada (no utilizada y, por lo tanto, anónima)<br /></translation>
</message>
<message>
<source>with Security Level </source>
<translation>con Nivel de Seguridad</translation>
</message>
<message>
<source>Confirm send coins</source>
<translation>Confirmar enviar monedas</translation>
</message>
<message>
<source>Version 1 zGTR require a security level of 100 to successfully spend.</source>
<translation>La versión 1 zGTR requiere un nivel de seguridad de 100 para gastar exitosamente.</translation>
</message>
<message>
<source>Failed to spend zGTR</source>
<translation>Error al enviar zGTR</translation>
</message>
<message>
<source>Failed to fetch mint associated with serial hash</source>
<translation>Error al buscar la asociación del acuñado con el hash serial</translation>
</message>
<message>
<source>Too much inputs (</source>
<translation>Demasiadas entradas (</translation>
</message>
<message>
<source>
Either mint higher denominations (so fewer inputs are needed) or reduce the amount to spend.</source>
<translation>
O mint las denominaciones más altas (por lo tanto, se necesitan menos datos) o reduzca la cantidad a gastar.</translation>
</message>
<message>
<source>Spend Zerocoin failed with status = </source>
<translation>Gastar Zerocoin falló con estado =</translation>
</message>
<message numerus="yes">
<source>PrivacyDialog</source>
<comment>Enter an amount of GTR to convert to zGTR</comment>
<translation><numerusform>PrivacyDialog</numerusform><numerusform>PrivacyDialog</numerusform></translation>
</message>
<message>
<source>denomination: </source>
<translation>Denominación:</translation>
</message>
<message>
<source>serial: </source>
<translation>serial:</translation>
</message>
<message>
<source>Spend is 1 of : </source>
<translation>Gasto 1 de :</translation>
</message>
<message>
<source>value out: </source>
<translation>valor salida:</translation>
</message>
<message>
<source>address: </source>
<translation>dirección:</translation>
</message>
<message>
<source>Sending successful, return code: </source>
<translation>Envío correcto, código devuelto: </translation>
</message>
<message>
<source>txid: </source>
<translation>txid:</translation>
</message>
<message>
<source>fee: </source>
<translation>comisión:</translation>
</message>
</context>
<context>
<name>QObject</name>
<message>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<source>Enter a GTR address (e.g. %1)</source>
<translation>Introduzca una dirección GTR (p.e. %1)</translation>
</message>
<message>
<source>%1 d</source>
<translation>%1 d</translation>
</message>
<message>
<source>%1 h</source>
<translation>%1 h</translation>
</message>
<message>
<source>%1 m</source>
<translation>%1 m</translation>
</message>
<message>
<source>%1 s</source>
<translation>%1 s</translation>
</message>
<message>
<source>NETWORK</source>
<translation>RED</translation>
</message>
<message>
<source>BLOOM</source>
<translation>BLOOM</translation>
</message>
<message>
<source>UNKNOWN</source>
<translation>DESCONOCIDO</translation>
</message>
<message>
<source>None</source>
<translation>Ninguno</translation>
</message>
<message>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<source>%1 ms</source>
<translation>%1 ms</translation>
</message>
</context>
<context>
<name>QRImageWidget</name>
<message>
<source>Save Image...</source>
<translation>Guardar Imagen...</translation>
</message>
<message>
<source>Copy Image</source>
<translation>Copiar Imagen</translation>
</message>
<message>
<source>Save QR Code</source>
<translation>Guardar Código QR</translation>
</message>
<message>
<source>PNG Image (*.png)</source>
<translation>Imagen PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<source>Tools window</source>
<translation>Ventana de herramientas</translation>
</message>
<message>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<source>General</source>
<translation>General</translation>
</message>
<message>
<source>Name</source>
<translation>Nombre</translation>
</message>
<message>
<source>Client name</source>
<translation>Nombre de cliente</translation>
</message>
<message>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<source>Number of connections</source>
<translation>Número de conexiones</translation>
</message>
<message>
<source>Open</source>
<translation>Abrir</translation>
</message>
<message>
<source>Startup time</source>
<translation>Tiempo de funcionamiento</translation>
</message>
<message>
<source>Network</source>
<translation>Red</translation>
</message>
<message>
<source>Last block time</source>
<translation>Momento del último bloque</translation>
</message>
<message>
<source>Debug log file</source>
<translation>Fichero de depuración</translation>
</message>
<message>
<source>Using OpenSSL version</source>
<translation>Usando la versión OpenSSL</translation>
</message>
<message>
<source>Build date</source>
<translation>Fecha de compilación</translation>
</message>
<message>
<source>Current number of blocks</source>
<translation>Número actual de bloques</translation>
</message>
<message>
<source>Client version</source>
<translation>Versión del cliente</translation>
</message>
<message>
<source>Using BerkeleyDB version</source>
<translation>Usando BerkeleyDB versión</translation>
</message>
<message>
<source>Block chain</source>
<translation>Blockchain</translation>
</message>
<message>
<source>Open the GTR debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Abrir el fichero de depuración GTR en el directorio actual de datos. Esto puede llevar unos segundos para ficheros de depuración grandes.</translation>
</message>
<message>
<source>Number of Masternodes</source>
<translation>Número de masternodes</translation>
</message>
<message>
<source>Console</source>
<translation>Consola</translation>
</message>
<message>
<source>Clear console</source>
<translation>Limpiar consola</translation>
</message>
<message>
<source>Network Traffic</source>
<translation>Tráfico de Red</translation>
</message>
<message>
<source>Clear</source>
<translation>Limpiar</translation>
</message>
<message>
<source>Totals</source>
<translation>Totales</translation>
</message>
<message>
<source>Received</source>
<translation>Recibidos</translation>
</message>
<message>
<source>Sent</source>
<translation>Enviados</translation>
</message>
<message>
<source>Peers</source>
<translation>Peers</translation>
</message>
<message>
<source>Banned peers</source>
<translation>Peers proibidos</translation>
</message>
<message>
<source>Select a peer to view detailed information.</source>
<translation>Seleccione un peer conectado para ver información detallada.</translation>
</message>
<message>
<source>Whitelisted</source>
<translation>En lista blance</translation>
</message>
<message>
<source>Direction</source>
<translation>Dirección</translation>
</message>
<message>
<source>Protocol</source>
<translation>Protocolo</translation>
</message>
<message>
<source>Version</source>
<translation>Versión</translation>
</message>
<message>
<source>Services</source>
<translation>Servicios</translation>
</message>
<message>
<source>Ban Score</source>
<translation>Puntuación de Baneo</translation>
</message>
<message>
<source>Connection Time</source>
<translation>Tiempo de Conexión</translation>
</message>
<message>
<source>Last Send</source>
<translation>Último Envío</translation>
</message>
<message>
<source>Last Receive</source>
<translation>Última Recepción</translation>
</message>
<message>
<source>Bytes Sent</source>
<translation>Bytes Enviados</translation>
</message>
<message>
<source>Bytes Received</source>
<translation>Bytes Recibidos</translation>
</message>
<message>
<source>Ping Time</source>
<translation>Intervalo de Ping</translation>
</message>
<message>
<source>Wallet Repair</source>
<translation>Reparar Monedero</translation>
</message>
<message>
<source>Delete local Blockchain Folders</source>
<translation>Eliminar directorios locales de Blockchain</translation>
</message>
<message>
<source>Wallet In Use:</source>
<translation>Monedero En Uso:</translation>
</message>
<message>
<source>Starting Block</source>
<translation>Bloque de empiezo</translation>
</message>
<message>
<source>Synced Headers</source>
<translation>Encabezados sincronizados</translation>
</message>
<message>
<source>Synced Blocks</source>
<translation>Bloques sincronizados</translation>
</message>
<message>
<source>The duration of a currently outstanding ping.</source>
<translation>La duración de un ping actualmente pendiente.</translation>
</message>
<message>
<source>Ping Wait</source>
<translation>Espera de Ping</translation>
</message>
<message>
<source>Time Offset</source>
<translation>Desplazamiento de tiempo</translation>
</message>
<message>
<source>Custom Backup Path:</source>
<translation>Ruta personalizada de la copia de seguridad:</translation>
</message>
<message>
<source>Custom zGTR Backup Path:</source>
<translation>Ruta personalizada de la copia de seguridad zGTR:</translation>
</message>
<message>
<source>Custom Backups Threshold:</source>
<translation>Límite de copias de seguridad personalizadas:</translation>
</message>
<message>
<source>Salvage wallet</source>
<translation>Salvar monedero</translation>
</message>
<message>
<source>Attempt to recover private keys from a corrupt wallet.dat.</source>
<translation>Intentar recuperar las llaves privadas de un archivo wallet.dat corrupto.</translation>
</message>
<message>
<source>Rescan blockchain files</source>
<translation>Reescanear ficheros del blockchain</translation>
</message>
<message>
<source>Rescan the block chain for missing wallet transactions.</source>
<translation>Reescanear el blockchain buscando transacciones que faltan en el monedero.</translation>
</message>
<message>
<source>Recover transactions 1</source>
<translation>Recuperar transacciones 1</translation>
</message>
<message>
<source>Recover transactions from blockchain (keep meta-data, e.g. account owner).</source>
<translation>Recuperar transacciones del blockchain (mantener meta-datos, p.e. propietario de la cuenta,etc).</translation>
</message>
<message>
<source>Recover transactions 2</source>
<translation>Recuperar transacciones 2</translation>
</message>
<message>
<source>Recover transactions from blockchain (drop meta-data).</source>
<translation>Recuperar transacciones del blockchain (ignorar meta-datos).</translation>
</message>
<message>
<source>Upgrade wallet format</source>
<translation>Actualizar formato del monedero</translation>
</message>
<message>
<source>Rebuild block chain index from current blk000??.dat files.</source>
<translation>Reconstruir el índice del blockchain desde los archivos blk000??.dat actuales.</translation>
</message>
<message>
<source>-resync:</source>
<translation>resincronizando:</translation>
</message>
<message>
<source>Deletes all local blockchain folders so the wallet synchronizes from scratch.</source>
<translation>Eliminar todos los directorios locales del blockchain y sincronizar el monedero desde el principio</translation>
</message>
<message>
<source>The buttons below will restart the wallet with command-line options to repair the wallet, fix issues with corrupt blockhain files or missing/obsolete transactions.</source>
<translation>Los botones de más abajo reiniciarán el programa con comandos especiales para reparar el monedero, solucionar problemas con cadenas de bloques corruptas o buscar transacciones obsoletas o perdidas</translation>
</message>
<message>
<source>Wallet repair options.</source>
<translation>Opciones de reparación de monedero.</translation>
</message>
<message>
<source>Upgrade wallet to latest format on startup. (Note: this is NOT an update of the wallet itself!)</source>
<translation>Actualizar el monedero al último formato en el arranque. (Nota: esto NO es una actualización del monedero como tal!)</translation>
</message>
<message>
<source>Rebuild index</source>
<translation>Reconstruir índice</translation>
</message>
<message>
<source>In:</source>
<translation>Dentro:</translation>
</message>
<message>
<source>Out:</source>
<translation>Fuera:</translation>
</message>
<message>
<source>Welcome to the GTR RPC console.</source>
<translation>Bienvenido a la consola RPC de GTR.</translation>
</message>
<message>
<source>Disconnect Node</source>
<translation>Desconectar nodo</translation>
</message>
<message>
<source>Ban Node for</source>
<translation>Prohibir nodo por </translation>
</message>
<message>
<source>1 hour</source>
<translation>1 hora</translation>
</message>
<message>
<source>1 day</source>
<translation>1 día</translation>
</message>
<message>
<source>1 week</source>
<translation>1 semana</translation>
</message>
<message>
<source>1 year</source>
<translation>1 año</translation>
</message>
<message>
<source>Unban Node</source>
<translation> No prohibir nodo</translation>
</message>
<message>
<source>This will delete your local blockchain folders and the wallet will synchronize the complete Blockchain from scratch.<br /><br /></source>
<translation>Esto eliminará las carpetas de blockchain locales y el monedero sincronizará la cadena de bloques completa desde cero.<br /><br /></translation>
</message>
<message>
<source>This needs quite some time and downloads a lot of data.<br /><br /></source>
<translation>Esto requiere bastante tiempo y la descarga una gran cantidad de datos.<br /><br /></translation>
</message>
<message>
<source>Your transactions and funds will be visible again after the download has completed.<br /><br /></source>
<translation>Sus transacciones y fondos serán visibles nuevamente después de que se haya completado la descarga.<br /><br /></translation>
</message>
<message>
<source>Do you want to continue?.<br /></source>
<translation>¿Desea continuar?.<br /></translation>
</message>
<message>
<source>Confirm resync Blockchain</source>
<translation>Confirmar resincronización del Blockchain</translation>
</message>
<message>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Use las teclas arriba y abajo para navegar por la historia, y <b>Ctrl-L</b> para limpiar la pantalla.</translation>
</message>
<message>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Escriba <b>help</b> para ver una lista de posibles comandos.</translation>
</message>
<message>
<source>%1 B</source>
<translation>%1 B</translation>
</message>
<message>
<source>%1 KB</source>
<translation>%1 KB</translation>
</message>
<message>
<source>%1 MB</source>
<translation>%1 MB</translation>
</message>
<message>
<source>%1 GB</source>
<translation>%1 GB</translation>
</message>
<message>
<source>(node id: %1)</source>
<translation>(node id: %1)</translation>
</message>
<message>
<source>via %1</source>
<translation>vía %1</translation>
</message>
<message>
<source>never</source>
<translation>nunca</translation>
</message>
<message>
<source>Inbound</source>
<translation>Entrantes</translation>
</message>
<message>
<source>Outbound</source>
<translation>Salientes</translation>
</message>
<message>
<source>Yes</source>
<translation>Sí</translation>
</message>
<message>
<source>No</source>
<translation>No</translation>
</message>
<message>
<source>Unknown</source>
<translation>Desconocido</translation>
</message>
</context>
<context>
<name>ReceiveCoinsDialog</name>
<message>
<source>Reuse one of the previously used receiving addresses.<br>Reusing addresses has security and privacy issues.<br>Do not use this unless re-generating a payment request made before.</source>
<translation>Reutilizar una de las direcciones de recepción anteriores.<br>Reutilizar direcciones de recepción tiene implicaciones de seguridad y privacidad.<br>No la utilizar al menos que esté re-generando una solicitud de pago anterior.</translation>
</message>
<message>
<source>Reuse an existing receiving address (not recommended)</source>
<translation>Reutilizar una dirección de recepción existente (no recomendado)</translation>
</message>
<message>
<source>Message:</source>
<translation>Mensaje:</translation>
</message>
<message>
<source>An optional label to associate with the new receiving address.</source>
<translation>Una etiqueta opcional a asociar con la nueva dirección de recepción.</translation>
</message>
<message>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the GTR network.</source>
<translation>Un mensaje opcional a adjuntar a la solicitud de pago, que será mostrado cuando se abra la solicitud. Nota: El mensaje no se envía junto al pago por la red GTR.</translation>
</message>
<message>
<source>RECEIVE</source>
<translation>RECIBIR</translation>
</message>
<message>
<source>An optional message to attach to the payment request, which will be displayed when the request is opened.<br>Note: The message will not be sent with the payment over the GTR network.</source>
<translation>Un mensaje opcional a adjuntar a la solicitud de pago, que será mostrado cuando se abra la solicitud. <br>Nota: El mensaje no se envía junto al pago por la red GTR.</translation>
</message>
<message>
<source>Use this form to request payments. All fields are <b>optional</b>.</source>
<translation>Use este formulario para solicitar pagos. Todos los campos <b>opcionales</b>.</translation>
</message>
<message>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source>
<translation>Una cantidad opcional a solicitar. Deje esto vacío o en cero para no pedir una cantidad específica.</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Request payment</source>
<translation>Solicitud de pago</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>Limpiar todos los campos del formulario.</translation>
</message>
<message>
<source>Clear</source>
<translation>Limpiar</translation>
</message>
<message>
<source>Requested payments history</source>
<translation>Historial de peticiones de pago</translation>
</message>
<message>
<source>Show the selected request (does the same as double clicking an entry)</source>
<translation>Mostrar la solicitud seleccionada (lo mismo que hacer doble click en una entrada)</translation>
</message>
<message>
<source>Show</source>
<translation>Mostrar</translation>
</message>
<message>
<source>Remove the selected entries from the list</source>
<translation>Quitar las entradas seleccionadas de la lista</translation>
</message>
<message>
<source>Remove</source>
<translation>Quitar</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<source>Copy message</source>
<translation>Copiar mensaje</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
</context>
<context>
<name>ReceiveRequestDialog</name>
<message>
<source>QR Code</source>
<translation>Código QR</translation>
</message>
<message>
<source>Copy URI</source>
<translation>Copiar Identificador</translation>
</message>
<message>
<source>Copy Address</source>
<translation>Copiar Dirección</translation>
</message>
<message>
<source>Save Image...</source>
<translation>Guardar Imagen...</translation>
</message>
<message>
<source>Request payment to %1</source>
<translation>Solicitar pago a %1</translation>
</message>
<message>
<source>Payment information</source>
<translation>Información de pago</translation>
</message>
<message>
<source>URI</source>
<translation>URI (identificador de recurso)</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>El identificador de recurso es demasiado largo, intente reducir el texto para la etiqueta / mensaje.</translation>
</message>
<message>
<source>Error encoding URI into QR Code.</source>
<translation>Error codificando el identificador de recurso dentro del código QR.</translation>
</message>
</context>
<context>
<name>RecentRequestsTableModel</name>
<message>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<source>(no message)</source>
<translation>(no hay mensajes)</translation>
</message>
<message>
<source>(no amount)</source>
<translation>(sin cantidad)</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<source>Send Coins</source>
<translation>Enviar Monedas</translation>
</message>
<message>
<source>SEND</source>
<translation>ENVIAR</translation>
</message>
<message>
<source>Coin Control Features</source>
<translation>Funciones de Control de Monedas</translation>
</message>
<message>
<source>Insufficient funds!</source>
<translation>¡Fondos insuficientes!</translation>
</message>
<message>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<source>medium</source>
<translation>media</translation>
</message>
<message>
<source>Fee:</source>
<translation>Comisión:</translation>
</message>
<message>
<source>Dust:</source>
<translation>Calderilla:</translation>
</message>
<message>
<source>no</source>
<translation>no</translation>
</message>
<message>
<source>After Fee:</source>
<translation>Después de Comisión:</translation>
</message>
<message>
<source>Change:</source>
<translation>Cambio:</translation>
</message>
<message>
<source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source>
<translation>Si esto está activado, pero la dirección de cambio está vacía o es inválida, el cambio será mandado a una nueva dirección generada.</translation>
</message>
<message>
<source>Custom change address</source>
<translation>Dirección de cambio personalizada</translation>
</message>
<message>
<source>Split UTXO</source>
<translation>Partir UTXO</translation>
</message>
<message>
<source># of outputs</source>
<translation># de salidas</translation>
</message>
<message>
<source>UTXO Size:</source>
<translation>Tamaño de UTXO:</translation>
</message>
<message>
<source>0 GTR</source>
<translation>0 GTR</translation>
</message>
<message>
<source>Transaction Fee:</source>
<translation>Comisión de transacción:</translation>
</message>
<message>
<source>Choose...</source>
<translation>Elegir...</translation>
</message>
<message>
<source>collapse fee-settings</source>
<translation>minimizar los ajustes de comisión</translation>
</message>
<message>
<source>Minimize</source>
<translation>Minimizar</translation>
</message>
<message>
<source>per kilobyte</source>
<translation>por kilobyte</translation>
</message>
<message>
<source>total at least</source>
<translation>total al menos</translation>
</message>
<message>
<source>(read the tooltip)</source>
<translation>(leer el consejo)</translation>
</message>
<message>
<source>Custom:</source>
<translation>Personalizado:</translation>
</message>
<message>
<source>(Smart fee not initialized yet. This usually takes a few blocks...)</source>
<translation>(La comisión automática no se ha inicializado todavía. Esto normalmente necesita unos cuantos bloques...)</translation>
</message>
<message>
<source>SwiftX</source>
<translation>SwiftX</translation>
</message>
<message>
<source>Confirmation time:</source>
<translation>Tiempo de confirmación:</translation>
</message>
<message>
<source>Open Coin Control...</source>
<translation>Abrir Control de Monedas...</translation>
</message>
<message>
<source>Coins automatically selected</source>
<translation>Monedas seleccionadas automáticamente</translation>
</message>
<message>
<source>If the custom fee is set to 1000 uGTRs and the transaction is only 250 bytes, then "per kilobyte" only pays 250 uGTRs in fee,<br />while "at least" pays 1000 uGTRs. For transactions bigger than a kilobyte both pay by kilobyte.</source>
<translation>Si la comisión personalizada se fija en 1000 uGTRs y la transacción necesita sólo 250 bytes, entonces la opción "por kilobyte" sólo pagará 250 uGTRs de comisión,<br/>mientras "por lo menos" pagará 1000 uGTRs. Para transacciones que midan más de un kilobyte ambas pagan por kilobyte.</translation>
</message>
<message>
<source>If the custom fee is set to 1000 uGTRs and the transaction is only 250 bytes, then "per kilobyte" only pays 250 uGTRs in fee,<br />while "total at least" pays 1000 uGTRs. For transactions bigger than a kilobyte both pay by kilobyte.</source>
<translation>Si la comisión personalizada se fija en 1000 uGTRs y la transacción necesita sólo 250 bytes, entonces la opción "por kilobyte" sólo pagará 250 uGTRs de comisión,<br/>mientras "por lo menos" pagará 1000 uGTRs. Para transacciones que midan más de un kilobyte ambas pagan por kilobyte.</translation>
</message>
<message>
<source>Paying only the minimum fee is just fine as long as there is less transaction volume than space in the blocks.<br />But be aware that this can end up in a never confirming transaction once there is more demand for GTR transactions than the network can process.</source>
<translation>Pagar sólo la comisión mínima está bien mientras haya menos volumen de transacciones que espacio en los bloques.<br/>Pero tenga en cuenta que esto podría acabar en una transacción que nunca se confirme si hay más demanda de transacciones GTR de las que la red puede procesar.</translation>
</message>
<message>
<source>normal</source>
<translation>normal</translation>
</message>
<message>
<source>fast</source>
<translation>rápido</translation>
</message>
<message>
<source>Recommended</source>
<translation>Recomendada</translation>
</message>
<message>
<source>Send as zero-fee transaction if possible</source>
<translation>Enviar como transacción sin comisiones si es posible</translation>
</message>
<message>
<source>(confirmation may take longer)</source>
<translation>(la primera confirmación puede tardar más)</translation>
</message>
<message>
<source>Confirm the send action</source>
<translation>Confirmar la acción de enviar</translation>
</message>
<message>
<source>Send</source>
<translation>Enviar</translation>
</message>
<message>
<source>Clear all fields of the form.</source>
<translation>Limpiar todos los campos del formulario.</translation>
</message>
<message>
<source>Clear All</source>
<translation>Limpiar Todo</translation>
</message>
<message>
<source>Send to multiple recipients at once</source>
<translation>Enviar a varios destinatarios al mismo tiempo</translation>
</message>
<message>
<source>Add Recipient</source>
<translation>Añadir Destinatario</translation>
</message>
<message>
<source>Anonymized GTR</source>
<translation>GTR anonimizados</translation>
</message>
<message>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy fee</source>
<translation>Copiar comisión</translation>
</message>
<message>
<source>Copy after fee</source>
<translation>Copiar después de comisión</translation>
</message>
<message>
<source>Copy bytes</source>
<translation>Copiar octetos</translation>
</message>
<message>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<source>Copy dust</source>
<translation>Copiar calderilla</translation>
</message>
<message>
<source>Copy change</source>
<translation>Copiar cambio</translation>
</message>
<message>
<source>The split block tool does not work when sending to outside addresses. Try again.</source>
<translation>La herramienta de separación de bloques no funciona cuando se envía a direcciones exteriores. Inténtelo otra vez.</translation>
</message>
<message>
<source>The split block tool does not work with multiple addresses. Try again.</source>
<translation>La herramienta de división de bloques no funciona con varias direcciones. Inténtelo otra vez.</translation>
</message>
<message>
<source>Warning: Invalid GTR address</source>
<translation>Advertencia: Direcciones GTR inválidas</translation>
</message>
<message>
<source>%1 to %2</source>
<translation>%1 a %2</translation>
</message>
<message>
<source>Are you sure you want to send?</source>
<translation>¿Está seguro de querer enviar?</translation>
</message>
<message>
<source>are added as transaction fee</source>
<translation>son añadidos como comisión de transacción</translation>
</message>
<message>
<source>Total Amount = <b>%1</b><br />= %2</source>
<translation>Cantidad total = <b>%1</b><br />= %2</translation>
</message>
<message>
<source>Confirm send coins</source>
<translation>Confirmar enviar monedas</translation>
</message>
<message>
<source>A fee %1 times higher than %2 per kB is considered an insanely high fee.</source>
<translation>Una comisión %1 veces más alta que %2 por kB se considera exageradamente alta.</translation>
</message>
<message numerus="yes">
<source>Estimated to begin confirmation within %n block(s).</source>
<translation><numerusform>Estimamos que empezará la confirmación en %n bloques.</numerusform><numerusform>Estimamos que empezará la confirmación en %n bloques.</numerusform></translation>
</message>
<message>
<source>The recipient address is not valid, please recheck.</source>
<translation>La dirección de destino no es válida, por favor compruébelo de nuevo.</translation>
</message>
<message>
<source>using SwiftX</source>
<translation>usando o SwiftX</translation>
</message>
<message>
<source> split into %1 outputs using the UTXO splitter.</source>
<translation>separado en %1 salidas usando el separador UTXO.</translation>
</message>
<message>
<source><b>(%1 of %2 entries displayed)</b></source>
<translation><b>(%1 de %2 entradas mostradas)</b></translation>
</message>
<message>
<source>The amount to pay must be larger than 0.</source>
<translation>La cantidad a pagar debe ser mayor de 0.</translation>
</message>
<message>
<source>The amount exceeds your balance.</source>
<translation>La cantidad excede su saldo.</translation>
</message>
<message>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>El total excede su saldo si contamos la comisión de %1 .</translation>
</message>
<message>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Hemos encontrado una dirección duplicada, sólo podemos enviar a cada dirección una vez por envío.</translation>
</message>
<message>
<source>Transaction creation failed!</source>
<translation>¡Fallo al crear la transacción!</translation>
</message>
<message>
<source>The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>¡La transacción fue rechazada! Esto puede ocurrir si alguna de sus monedas ya se gastó con anterioridad, por ejemplo, si estuvo usted usando una copia de su monedero wallet.dat y gastó allí monedas pero no las marcó como gastadas aquí.</translation>
</message>
<message>
<source>Error: The wallet was unlocked only to anonymize coins.</source>
<translation>Error: El monedero se desbloqueó sólo para anonimizar monedas.</translation>
</message>
<message>
<source>Error: The wallet was unlocked only to anonymize coins. Unlock canceled.</source>
<translation>Error: El monedero ya fue desbloqueado para anonimizar monedas. El desbloqueo total se ha cancelado.</translation>
</message>
<message>
<source>Pay only the minimum fee of %1</source>
<translation>Pagar sólo la comisión mínima de %1</translation>
</message>
<message>
<source>Warning: Unknown change address</source>
<translation>Advertencia: Dirección de cambio desconocida</translation>
</message>
<message>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<source>This is a normal payment.</source>
<translation>Esto es un pago normal.</translation>
</message>
<message>
<source>Pay To:</source>
<translation>Pagar A:</translation>
</message>
<message>
<source>The GTR address to send the payment to</source>
<translation>La dirección GTR a la cual enviar el pago</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Escoja una dirección usada previamente</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt + A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde el portapapeles</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt + P</translation>
</message>
<message>
<source>Remove this entry</source>
<translation>Quitar esta entrada</translation>
</message>
<message>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<source>Enter a label for this address to add it to the list of used addresses</source>
<translation>Introduzca una etiqueta para esta dirección para añadirla a la lista de direcciones utilizadas</translation>
</message>
<message>
<source>Amount:</source>
<translation>Cantidad:</translation>
</message>
<message>
<source>Message:</source>
<translation>Mensaje:</translation>
</message>
<message>
<source>A message that was attached to the GTR: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the GTR network.</source>
<translation>Un mensaje adjunto al GTR: En recurso web especificado será guardado junto a la transacción para su referencia. Nota: Este mensaje no se enviará por la red GTR.</translation>
</message>
<message>
<source>This is an unverified payment request.</source>
<translation>Esta es una solicitud de pago no verificada.</translation>
</message>
<message>
<source>Pay To:</source>
<translation>Pagar A:</translation>
</message>
<message>
<source>Memo:</source>
<translation>Texto libre:</translation>
</message>
<message>
<source>This is a verified payment request.</source>
<translation>Esta es una solicitud de pago verificada.</translation>
</message>
<message>
<source>Enter a label for this address to add it to your address book</source>
<translation>Introduzca una etiqueta para esta dirección para añadirla a su libreta de direcciones</translation>
</message>
</context>
<context>
<name>ShutdownWindow</name>
<message>
<source>GTR Core is shutting down...</source>
<translation>El programa GTR se está cerrando...</translation>
</message>
<message>
<source>Do not shut down the computer until this window disappears.</source>
<translation>No apague el equipo hasta que esta ventana desaparezca.</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<source>Signatures - Sign / Verify a Message</source>
<translation>Firmas - Firmar / Verificar un Mensaje</translation>
</message>
<message>
<source>Sign Message</source>
<translation>Firmar Mensaje</translation>
</message>
<message>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Usted puede firmar mensajes con su dirección para probar que son auténticos. Tenga cuidado con firmar algo corto o vago como "hola", pues los ataques de phising pueden engañarle para que firme su identidad para ellos. Sólo firme textos detallados con los que esté de acuerdo.</translation>
</message>
<message>
<source>The GTR address to sign the message with</source>
<translation>La dirección GTR con la que desee firmar el mensaje</translation>
</message>
<message>
<source>Choose previously used address</source>
<translation>Escoja una dirección usada previamente</translation>
</message>
<message>
<source>Alt+A</source>
<translation>Alt + A</translation>
</message>
<message>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde el portapapeles</translation>
</message>
<message>
<source>Alt+P</source>
<translation>Alt + P</translation>
</message>
<message>
<source>Enter the message you want to sign here</source>
<translation>Introduzca el mensaje que quiere firmar aquí</translation>
</message>
<message>
<source>Signature</source>
<translation>Firma</translation>
</message>
<message>
<source>Copy the current signature to the system clipboard</source>
<translation>Copiar la firma actual al portapapeles del sistema</translation>
</message>
<message>
<source>Sign the message to prove you own this GTR address</source>
<translation>Firme el mensaje para demostrar que eres el propietario de esta dirección GTR</translation>
</message>
<message>
<source>The GTR address the message was signed with</source>
<translation>La dirección GTR con la que se firmó el mensaje</translation>
</message>
<message>
<source>Verify the message to ensure it was signed with the specified GTR address</source>
<translation>Verifica el mensaje para asegurar que fue firmado con la dirección GTR especificada</translation>
</message>
<message>
<source>Sign Message</source>
<translation>Firmar Mensaje</translation>
</message>
<message>
<source>Reset all sign message fields</source>
<translation>Limpiar todos los campos de firma de mensaje</translation>
</message>
<message>
<source>Clear All</source>
<translation>Limpiar Todo</translation>
</message>
<message>
<source>Verify Message</source>
<translation>Verificar Mensaje</translation>
</message>
<message>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Introduzca la dirección de firma, mensaje (asegúrese de que copia los puntos y aparte, espacios, tabuladores, etc. exactamente) y la firma debajo para verificar el mensaje. Vaya con cuidado de no leer más en la firma de loque está en el mensaje firmado propiamente dicho, para evitar ser engañado por un ataque "hombre en el medio".</translation>
</message>
<message>
<source>Verify Message</source>
<translation>Verificar Mensaje</translation>
</message>
<message>
<source>Reset all verify message fields</source>
<translation>Limpiar todos los campos de verificación de mensaje</translation>
</message>
<message>
<source>Click "Sign Message" to generate signature</source>
<translation>Haga click en "Firmar Mensaje" para generar la firma</translation>
</message>
<message>
<source>The entered address is invalid.</source>
<translation>La dirección introducida es inválida</translation>
</message>
<message>
<source>Please check the address and try again.</source>
<translation>Por favor compruebe la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<source>The entered address does not refer to a key.</source>
<translation>La dirección introducida no se refiere a ninguna clave.</translation>
</message>
<message>
<source>Wallet unlock was cancelled.</source>
<translation>El desbloqueo del monedero fue cancelado.</translation>
</message>
<message>
<source>Private key for the entered address is not available.</source>
<translation>La clave privada para la dirección introducida no está disponible.</translation>
</message>
<message>
<source>Message signing failed.</source>
<translation>La firma del mensaje falló.</translation>
</message>
<message>
<source>Message signed.</source>
<translation>Mensaje firmado.</translation>
</message>
<message>
<source>The signature could not be decoded.</source>
<translation>La firma no pudo ser decodificada.</translation>
</message>
<message>
<source>Please check the signature and try again.</source>
<translation>Por favor compruebe la firma e inténtelo otra vez.</translation>
</message>
<message>
<source>The signature did not match the message digest.</source>
<translation>La firma no coincide con el resumen del mensaje.</translation>
</message>
<message>
<source>Message verification failed.</source>
<translation>Falló la verificación del mensaje.</translation>
</message>
<message>
<source>Message verified.</source>
<translation>Mensaje verificado.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<source>GTR Core</source>
<translation>GTR Core</translation>
</message>
<message>
<source>Version %1</source>
<translation>Versión %1</translation>
</message>
<message>
<source>The Bitcoin Core developers</source>
<translation>Los desarrolladores de Bitcoin Core</translation>
</message>
<message>
<source>The Dash Core developers</source>
<translation>Los desarrolladores de Dash Core</translation>
</message>
<message>
<source>The GTR Core developers</source>
<translation>Los desarrolladores de GTR Core</translation>
</message>
<message>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<source>KB/s</source>
<translation>KB/s</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation><numerusform>Abierto para %n bloques más</numerusform><numerusform>Abierto para %n bloques más</numerusform></translation>
</message>
<message>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<source>conflicted</source>
<translation>conflictivo (bloque huérfano?)</translation>
</message>
<message>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<source>%1/unconfirmed</source>
<translation>%1/sin confirmar</translation>
</message>
<message>
<source>%1 confirmations</source>
<translation>%1 confirmaciones</translation>
</message>
<message>
<source>%1/offline (verified via SwiftX)</source>
<translation>%1/offline (verificado via swifttx)</translation>
</message>
<message>
<source>%1/confirmed (verified via SwiftX)</source>
<translation>%1/confirmado (verificado via swifttx)</translation>
</message>
<message>
<source>%1 confirmations (verified via SwiftX)</source>
<translation>%1 confirmaciones (verificado via swifttx)</translation>
</message>
<message>
<source>%1/offline (SwiftX verification in progress - %2 of %3 signatures)</source>
<translation>%1/offline (verificación SwiftX en marcha - %2 de %3 firmas)</translation>
</message>
<message>
<source>%1/confirmed (SwiftX verification in progress - %2 of %3 signatures )</source>
<translation>%1/confirmado (verificación SwiftX en marcha - %2 de %3 firmas)</translation>
</message>
<message>
<source>%1 confirmations (SwiftX verification in progress - %2 of %3 signatures)</source>
<translation>%1 confirmaciones (verificación SwiftX en marcha - %2 de %3 firmas)</translation>
</message>
<message>
<source>%1/offline (SwiftX verification failed)</source>
<translation>%1/offline (falló la verificación SwiftX)</translation>
</message>
<message>
<source>%1/confirmed (SwiftX verification failed)</source>
<translation>%1/confirmado (falló la verificación SwiftX)</translation>
</message>
<message>
<source>Status</source>
<translation>Estado</translation>
</message>
<message>
<source>, has not been successfully broadcast yet</source>
<translation>, no ha sido correctamente transmitida todavía</translation>
</message>
<message numerus="yes">
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, retransmitido a través de %n nodos</numerusform><numerusform>, retransmitido a través de %n nodos</numerusform></translation>
</message>
<message>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<source>Source</source>
<translation>Fuente</translation>
</message>
<message>
<source>Generated</source>
<translation>Generado</translation>
</message>
<message>
<source>From</source>
<translation>De</translation>
</message>
<message>
<source>unknown</source>
<translation>desconocido</translation>
</message>
<message>
<source>To</source>
<translation>A</translation>
</message>
<message>
<source>own address</source>
<translation>dirección propia</translation>
</message>
<message>
<source>watch-only</source>
<translation>sólo-lectura</translation>
</message>
<message>
<source>label</source>
<translation>etiqueta</translation>
</message>
<message>
<source>Credit</source>
<translation>Crédito</translation>
</message>
<message numerus="yes">
<source>matures in %n more block(s)</source>
<translation><numerusform>madura en %n bloques más</numerusform><numerusform>maduracíon en %n bloques más</numerusform></translation>
</message>
<message>
<source>not accepted</source>
<translation>rechazado</translation>
</message>
<message>
<source>Debit</source>
<translation>Débito</translation>
</message>
<message>
<source>Total debit</source>
<translation>Débito total</translation>
</message>
<message>
<source>Total credit</source>
<translation>Abonos totales</translation>
</message>
<message>
<source>Transaction fee</source>
<translation>Comisión de transacción</translation>
</message>
<message>
<source>Net amount</source>
<translation>Cantidad neta</translation>
</message>
<message>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<source>Comment</source>
<translation>Comentario</translation>
</message>
<message>
<source>Transaction ID</source>
<translation>ID de la transacción</translation>
</message>
<message>
<source>Output index</source>
<translation>Índice de salida</translation>
</message>
<message>
<source>Merchant</source>
<translation>Comerciante</translation>
</message>
<message>
<source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Las monedas generadas deben madurarse %1 bloques hasta que puedan ser gastadas de nuevo. Cuando generaste este bloque, se retransmitió a la red para añadirse a la cadena de bloques. Si falla en unirse a la cadena, su estado cambiará a "no aceptado" y no se podrá gastar de nuevo (bloque huérfano). Esto puede ocurrir ocasionalmente si otro nodo genera un bloque casi al mismo tiempo que el tuyo.</translation>
</message>
<message>
<source>Debug information</source>
<translation>Información de depuración</translation>
</message>
<message>
<source>Transaction</source>
<translation>Transacción</translation>
</message>
<message>
<source>Inputs</source>
<translation>Entradas</translation>
</message>
<message>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<source>true</source>
<translation>verdad</translation>
</message>
<message>
<source>false</source>
<translation>falso</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<source>Transaction details</source>
<translation>Detalles de transacción</translation>
</message>
<message>
<source>This pane shows a detailed description of the transaction</source>
<translation>Este panel muestra una descripción detallada de la transacción</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message numerus="yes">
<source>Open for %n more block(s)</source>
<translation><numerusform>Abierto para %n bloques más</numerusform><numerusform>Abierto para %n bloques más</numerusform></translation>
</message>
<message>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<source>Offline</source>
<translation>Desconectado</translation>
</message>
<message>
<source>Unconfirmed</source>
<translation>Sin confirmar</translation>
</message>
<message>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Confirmando (%1 de %2 confirmaciones recomendadas)</translation>
</message>
<message>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmado (%1 confirmaciones)</translation>
</message>
<message>
<source>Conflicted</source>
<translation>Huérfano</translation>
</message>
<message>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>Prematuro (%1 confirmaciones, estará disponible después de %2)</translation>
</message>
<message>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>¡Este bloque no fue recibido por los otros nodos y probablemente no será aceptado!</translation>
</message>
<message>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<source>Masternode Reward</source>
<translation>Recompensa de Nodo Maestro</translation>
</message>
<message>
<source>Received from</source>
<translation>Recibido desde</translation>
</message>
<message>
<source>Received via Obfuscation</source>
<translation>Recibido por medio de Ofuscación</translation>
</message>
<message>
<source>GTR Stake</source>
<translation>Stake GTR</translation>
</message>
<message>
<source>zGTR Stake</source>
<translation>Stake zGTR</translation>
</message>
<message>
<source>Obfuscation Denominate</source>
<translation>Ofuscación Denominada</translation>
</message>
<message>
<source>Obfuscation Collateral Payment</source>
<translation>Pago Colateral de Ofuscación</translation>
</message>
<message>
<source>Obfuscation Make Collateral Inputs</source>
<translation>La Ofuscación Crea Entradas Colaterales</translation>
</message>
<message>
<source>Obfuscation Create Denominations</source>
<translation>Ofuscación Crea Billetes</translation>
</message>
<message>
<source>Converted GTR to zGTR</source>
<translation>GTR convertidos a zGTR</translation>
</message>
<message>
<source>Spent zGTR</source>
<translation>Gastar zGTR</translation>
</message>
<message>
<source>Received GTR from zGTR</source>
<translation>GTR recibidos desde zGTR </translation>
</message>
<message>
<source>Minted Change as zGTR from zGTR Spend</source>
<translation>Cambio de acuñación en zGTR al Gastar zGTR</translation>
</message>
<message>
<source>Converted zGTR to GTR</source>
<translation>zGTR convertidos a GTR</translation>
</message>
<message>
<source>Anonymous (zGTR Transaction)</source>
<translation>Anónimo (Transacción zGTR)</translation>
</message>
<message>
<source>Anonymous (zGTR Stake)</source>
<translation>Anónimo (Stake zGTR)</translation>
</message>
<message>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<source>Orphan Block - Generated but not accepted. This does not impact your holdings.</source>
<translation>Bloque Huérfano - Generado pero no aceptado. Esto no afecta a su saldo.</translation>
</message>
<message>
<source>Payment to yourself</source>
<translation>Pago a usted mismo</translation>
</message>
<message>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<source>Obfuscated</source>
<translation>Ofuscado</translation>
</message>
<message>
<source>watch-only</source>
<translation>sólo-lectura</translation>
</message>
<message>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Estado de la transacción. Pase el ratón sobre este campo para mostrar el número de confirmaciones.</translation>
</message>
<message>
<source>Date and time that the transaction was received.</source>
<translation>Fecha y hora a la que se recibió la transacción.</translation>
</message>
<message>
<source>Type of transaction.</source>
<translation>Tipo de transacción.</translation>
</message>
<message>
<source>Whether or not a watch-only address is involved in this transaction.</source>
<translation>Indica si hay o no una dirección de sólo lectura relacionada en esta transacción</translation>
</message>
<message>
<source>Destination address of transaction.</source>
<translation>Dirección de destino de la transacción.</translation>
</message>
<message>
<source>Amount removed from or added to balance.</source>
<translation>Cantidad quitada o añadida al balance.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<source>All</source>
<translation>Todos</translation>
</message>
<message>
<source>Today</source>
<translation>Hoy</translation>
</message>
<message>
<source>This week</source>
<translation>Esta semana</translation>
</message>
<message>
<source>This month</source>
<translation>Este mes</translation>
</message>
<message>
<source>Last month</source>
<translation>Último mes</translation>
</message>
<message>
<source>This year</source>
<translation>Este año</translation>
</message>
<message>
<source>Range...</source>
<translation>Intervalo...</translation>
</message>
<message>
<source>Most Common</source>
<translation>Más Común</translation>
</message>
<message>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<source>To yourself</source>
<translation>A usted mismo</translation>
</message>
<message>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<source>Minted</source>
<translation>Creación de moneda</translation>
</message>
<message>
<source>Masternode Reward</source>
<translation>Recompensa de Nodo Maestro</translation>
</message>
<message>
<source>Zerocoin Mint</source>
<translation>Creación de moneda Zerocoin</translation>
</message>
<message>
<source>Zerocoin Spend</source>
<translation>Gasto de Zerocoin</translation>
</message>
<message>
<source>Zerocoin Spend to Self</source>
<translation>Pago Zerocoin a tí mismo</translation>
</message>
<message>
<source>Other</source>
<translation>Otro</translation>
</message>
<message>
<source>Enter address or label to search</source>
<translation>Introduzca dirección o etiqueta para buscar</translation>
</message>
<message>
<source>Min amount</source>
<translation>Cantidad mínima</translation>
</message>
<message>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<source>Copy transaction ID</source>
<translation>Copiar código de transacción</translation>
</message>
<message>
<source>Edit label</source>
<translation>Editar etiqueta</translation>
</message>
<message>
<source>Show transaction details</source>
<translation>Mostrar detalles de transacción</translation>
</message>
<message>
<source>Export Transaction History</source>
<translation>Exportar Histórico de Transacciones</translation>
</message>
<message>
<source>Comma separated file (*.csv)</source>
<translation>Archivo separado por comas (*.csv)</translation>
</message>
<message>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<source>Watch-only</source>
<translation>Sólo-lectura</translation>
</message>
<message>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<source>Exporting Failed</source>
<translation>Exportación fallida</translation>
</message>
<message>
<source>There was an error trying to save the transaction history to %1.</source>
<translation>Hubo un error intentando guardar el historial de transacciones a %1.</translation>
</message>
<message>
<source>Exporting Successful</source>
<translation>Exportación Correcta</translation>
</message>
<message>
<source>Received GTR from zGTR</source>
<translation>GTR recibidos desde zGTR</translation>
</message>
<message>
<source>Zerocoin Spend, Change in zGTR</source>
<translation>Gastar Zerocoin, Cambio en zGTR</translation>
</message>
<message>
<source>The transaction history was successfully saved to %1.</source>
<translation>El historial de transacción fue guardado satisfactoriamente a %1.</translation>
</message>
<message>
<source>Range:</source>
<translation>Rango:</translation>
</message>
<message>
<source>to</source>
<translation>a</translation>
</message>
</context>
<context>
<name>UnitDisplayStatusBarControl</name>
<message>
<source>Unit to show amounts in. Click to select another unit.</source>
<translation>Unidad en la que mostrar cantidades. Click para seleccionar otra unidad.</translation>
</message>
</context>
<context>
<name>WalletFrame</name>
<message>
<source>No wallet has been loaded.</source>
<translation>No se ha cargado ningún monedero.</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<source>Send Coins</source>
<translation>Enviar Monedas</translation>
</message>
<message>
<source>SwiftX doesn't support sending values that high yet. Transactions are currently limited to %1 GTR.</source>
<translation>SwiftX no soporta el envío de importes tan altos todavía. Las transacciones están actualmente limitadas a %1 GTR.</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<source>HISTORY</source>
<translation>HISTÓRIA</translation>
</message>
<message>
<source>Export</source>
<translation>Exportar</translation>
</message>
<message>
<source>Export the data in the current tab to a file</source>
<translation>Exportar los datos de la pestaña actual a un archivo</translation>
</message>
<message>
<source>Selected amount:</source>
<translation>Cantidad seleccionada:</translation>
</message>
<message>
<source>Backup Wallet</source>
<translation>Copia del Monedero</translation>
</message>
<message>
<source>Wallet Data (*.dat)</source>
<translation>Datos del Monedero (*.dat)</translation>
</message>
</context>
<context>
<name>ZGTRControlDialog</name>
<message>
<source>Select zGTR to Spend</source>
<translation>Selecciona zGTR para Gastar</translation>
</message>
<message>
<source>Quantity</source>
<translation>Cantidad</translation>
</message>
<message>
<source>0</source>
<translation>0</translation>
</message>
<message>
<source>zGTR</source>
<translation>zGTR</translation>
</message>
<message>
<source>Select/Deselect All</source>
<translation>Seleccionar/Deseleccionar Todos</translation>
</message>
<message>
<source>Is Spendable</source>
<translation>Es Gastable</translation>
</message>
</context>
<context>
<name>GTR-core</name>
<message>
<source>(1 = keep tx meta data e.g. account owner and payment request information, 2 = drop tx meta data)</source>
<translation>(1= mantener los metadatos tx p.e. dueño de la cuenta e información de la solicitud de pago, 2 = ignorar metadatos tx)</translation>
</message>
<message>
<source>Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times</source>
<translation>Permitir conexiones JSON-RPC desde la fuente especificada. Valido para <ip> sea una única IP (ej: 1.2.3.4), una red/mascara de red (ej: 1.2.3.4/255.255.255.0) o una red/CIDR (ej: 1.2.3.4/24). Esta opción puede ser especificada múltiples veces.</translation>
</message>
<message>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Conectarse a la IP seleccionada y siempre escuchar de ella. Usar la forma [host]:puerto para IPv6</translation>
</message>
<message>
<source>Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6</source>
<translation>Escuchar únicamente a la dirección IP indicada y a nodos de la lista blanca. Use la notación [host]:puerto para IPv6</translation>
</message>
<message>
<source>Bind to given address to listen for JSON-RPC connections. Use [host]:port notation for IPv6. This option can be specified multiple times (default: bind to all interfaces)</source>
<translation>Escuchar únicamente a la tarjeta de red indicada para conexiones JSON-RPC. Use la notación [host]:puerto para IPv6. Esta opción puede ser especificada varias veces (por defecto: escuchar en todas las tarjetas de red)</translation>
</message>
<message>
<source>Calculated accumulator checkpoint is not what is recorded by block index</source>
<translation>El punto de chequeo del acumulador que hemos calculado no coincide con lo guardado en el índice de bloques</translation>
</message>
<message>
<source>Cannot obtain a lock on data directory %s. GTR Core is probably already running.</source>
<translation>No se puede obtener un bloqueo sobre el directorio de datos %s. GTR Core esta probablemente en ejecución.</translation>
</message>
<message>
<source>Change automatic finalized budget voting behavior. mode=auto: Vote for only exact finalized budget match to my generated budget. (string, default: auto)</source>
<translation>Cambiar el comportamiento automático de votación de presupuesto final. modo=auto: Votar sólo por coincidencia exacta de un presupuesto finalizado con el generado por mí. (cadena, por defecto: auto)</translation>
</message>
<message>
<source>Continuously rate-limit free transactions to <n>*1000 bytes per minute (default:%u)</source>
<translation>Limite continuo de transacciones gratuitas <n>*1000 bytes por minuto (default:%u)</translation>
</message>
<message>
<source>Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)</source>
<translation>Crear nuevos archivos con el permiso predeterminado del sistema, en vez de umask 077 (solamente efectivo con la funcionalidad del monedero desabilitada)</translation>
</message>
<message>
<source>Delete all wallet transactions and only recover those parts of the blockchain through -rescan on startup</source>
<translation>Borrar todas las transacciones del monedero y solo recuperar partes de la cadena de bloque a traves de -rescan al inicio.</translation>
</message>
<message>
<source>Delete all zerocoin spends and mints that have been recorded to the blockchain database and reindex them (0-1, default: %u)</source>
<translation>Elimina todos registros de las transferencias y el acuñado de Zerocoin que se hayan registrado en la base de datos de Blockchain y vuelva a indexarlos (0-1, default: %u)</translation>
</message>
<message>
<source>Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.</source>
<translation>Distribuido bajo licencia MIT software license, ver el archivo adjunto COPYING or <http://www.opensource.org/licenses/mit-license.php></translation>
</message>
<message>
<source>Enable automatic wallet backups triggered after each zGTR minting (0-1, default: %u)</source>
<translation>Habilite copias de seguridad automáticas del monedero que se activan después de cada acuñación zGTR (0-1, predeterminado: %u)</translation>
</message>
<message>
<source>Enable or disable staking functionality for GTR inputs (0-1, default: %u)</source>
<translation>Habilitar o deshabilitar la funcionalidad de staking para las entradas GTR (0-1, predeterminado: %u)</translation>
</message>
<message>
<source>Enable or disable staking functionality for zGTR inputs (0-1, default: %u)</source>
<translation>Habilitar o deshabilitar la funcionalidad de staking para las entradas zGTR (0-1, predeterminado: %u)</translation>
</message>
<message>
<source>Enable spork administration functionality with the appropriate private key.</source>
<translation>Activar la función de administración de sporks con la llave privada apropiada.</translation>
</message>
<message>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly.</source>
<translation>Entrar en el modo prueba de regresión, el cual usa una cadena especial en el cual los bloques pueden ser resueltos instantaneamente.</translation>
</message>
<message>
<source>Error: Listening for incoming connections failed (listen returned error %s)</source>
<translation>Error: La escucha para conexiones entrantes falló (la escucha retorno error %s)</translation>
</message>
<message>
<source>Error: The transaction is larger than the maximum allowed transaction size!</source>
<translation>Error: ¡La transacción es más grande que el tamaño máximo de transacción permitido!</translation>
</message>
<message>
<source>Error: Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation>Error: Argumento -socks no soportado. No se permite utilizar la versión SOCKS, solo proxies de SOCKS5 están soportados.</translation>
</message>
<message>
<source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source>
<translation>Ejecutar un comando cuando una alerta relevante es recibida o estamos teniendo una realmente larga bifurcación (%s en cmd is reemplazado por el mensaje)</translation>
</message>
<message>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Ejecutar un comando cuando una transacción del monedero cambie (%s en cmd is reemplazado por TxID)</translation>
</message>
<message>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Ejecutar comando cuando el mejor bloque cambie (%s en cmd es reemplazado por el block hash)</translation>
</message>
<message>
<source>Fees (in GTR/Kb) smaller than this are considered zero fee for relaying (default: %s)</source>
<translation>Comisiones (en GTR/Kb) menores a esta son consideradas gratuitas para la propagación de la transacción (default: %s)</translation>
</message>
<message>
<source>Fees (in GTR/Kb) smaller than this are considered zero fee for transaction creation (default: %s)</source>
<translation>Comisiones (en GTR/Kb) menores a esta son consideradas gratuitas para la creación de la transacción (default: %s)</translation>
</message>
<message>
<source>Flush database activity from memory pool to disk log every <n> megabytes (default: %u)</source>
<translation>Trasladar la actividad de la base de datos en el pool de memoria al log de disco cada <n> megabytes (default: %u)</translation>
</message>
<message>
<source>Found unconfirmed denominated outputs, will wait till they confirm to continue.</source>
<translation>Se encontró salidas denominadas sin confirmar, se esperará hasta que se confirmen para continuar.</translation>
</message>
<message>
<source>If paytxfee is not set, include enough fee so transactions begin confirmation on average within n blocks (default: %u)</source>
<translation>Si paytxfee no esta definido, incluir suficiente comisión de manera que la transacción se vuelva confirmada en promedio dentro de n bloques (predeterminado: %u)</translation>
</message>
<message>
<source>In this mode -genproclimit controls how many blocks are generated immediately.</source>
<translation>En este modo -genproclimit controla cuantos bloques son generados inmediatamente.</translation>
</message>
<message>
<source>Insufficient or insufficient confirmed funds, you might need to wait a few minutes and try again.</source>
<translation>Fondos insuficientes o fondos confirmados insuficientes, es posible que tenga que esperar unos minutos y volver a intentarlo.</translation>
</message>
<message>
<source>Invalid amount for -maxtxfee=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source>
<translation>Importe inválido para -maxtxfee=<amount>:'%s' (debe ser al menos la comisión minrelay de %s para prevenir transacciones atascadas)</translation>
</message>
<message>
<source>Keep the specified amount available for spending at all times (default: 0)</source>
<translation>Mantener la cantidad especificada disponible para gastar en todo momento (por defecto: 0)</translation>
</message>
<message>
<source>Log transaction priority and fee per kB when mining blocks (default: %u)</source>
<translation>Prioridad y comisión del registro de transacciones por kB cuando se esta minando bloques (predeterminado: %u)</translation>
</message>
<message>
<source>Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)</source>
<translation>Mantener un indice de transacción completo, utilizado por la llamada rpc getrawtransaction (predeterminado: %u)</translation>
</message>
<message>
<source>Maximum size of data in data carrier transactions we relay and mine (default: %u)</source>
<translation>Tamaño máximo de datos en transacciones de transmisiones de datos que pasamos y minamos (predeterminado: %u)</translation>
</message>
<message>
<source>Maximum total fees to use in a single wallet transaction, setting too low may abort large transactions (default: %s)</source>
<translation>Comisión máxima total para usar en una única transacción de monedero, definirlo muy bajo puede abortar transacciones largas (predeterminado: %s)</translation>
</message>
<message>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: %u)</source>
<translation>Número de segundos a mantener pares con mal comportamiento de reconectarse (predeterminado: %u)</translation>
</message>
<message>
<source>Obfuscation uses exact denominated amounts to send funds, you might simply need to anonymize some more coins.</source>
<translation>La ofuscación utiliza importes en denominaciones exactas para enviar fondos, puedes simplemente necesitar anonimizar algunas monedas mas. </translation>
</message>
<message>
<source>Output debugging information (default: %u, supplying <category> is optional)</source>
<translation>Saluda de información de depuración (predeterminado: %u, proveyendo <category> es opcional)</translation>
</message>
<message>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation>Consultar por direcciones de pares vía búsqueda en DNS, si cantidad de direcciones esta bajo (predeterminado: 1 a menos que se utilice -connect)</translation>
</message>
<message>
<source>Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)</source>
<translation>Credenciales aleatorias para cada conexión proxy. Esto habilita el aislamiento del flujos de datos Tor (por defecto: %u)</translation>
</message>
<message>
<source>Require high priority for relaying free or low-fee transactions (default:%u)</source>
<translation>Requerir alta prioridad para transmitir transacción de libre o baja comisión (predeterminado: %u)</translation>
</message>
<message>
<source>Send trace/debug info to console instead of debug.log file (default: %u)</source>
<translation>Enviar info de traza/debug a la consola en lugar del archivo debug.log (predeterminado: %u)</translation>
</message>
<message>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source>
<translation>Define tamaño máximo de transacción alta prioridad/baja comisión en bytes (predeterminado: %d)</translation>
</message>
<message>
<source>Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)</source>
<translation>Define el número de hilos para el script de verificación (%u a %d, 0 = auto, <0 = dejar esa cantidad de núcleos libres, predeterminado: %d)</translation>
</message>
<message>
<source>Set the number of threads for coin generation if enabled (-1 = all cores, default: %d)</source>
<translation>Define el número de hilos para generación de moneda si esta habilitado (-1 = todos los núcleos, predeterminado: %d)</translation>
</message>
<message>
<source>Show N confirmations for a successfully locked transaction (0-9999, default: %u)</source>
<translation>Mostrar N confirmaciones para una transacción cerrada exitosamente (0-9999, predefinido: %u)</translation>
</message>
<message>
<source>Support filtering of blocks and transaction with bloom filters (default: %u)</source>
<translation>Soportar filtrado de bloques y transacciones con filtros bloom (por defecto: %u)</translation>
</message>
<message>
<source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source>
<translation>Este producto incluye software desarrollado por el Proyecto OpenSSL para uso en OpenSSL Toolkit <https://www.openssl.org/> y software de cifrado escrito por Eric Young y software de UPnP escrito por Thomas Bernard.</translation>
</message>
<message>
<source>Unable to bind to %s on this computer. GTR Core is probably already running.</source>
<translation>Imposible conectar a %s en esta computadora. Es probable que GTR Core ya este corriendo.</translation>
</message>
<message>
<source>Unable to locate enough Obfuscation denominated funds for this transaction.</source>
<translation>Imposible localizar suficientes fondos denominados de Ofuscación para esta transacción.</translation>
</message>
<message>
<source>Unable to locate enough Obfuscation non-denominated funds for this transaction that are not equal 10000 GTR.</source>
<translation>Imposible localizar suficientes fondos no-denominados de Ofuscación para esta transacción que no es igual a 10000 GTR.</translation>
</message>
<message>
<source>Unable to locate enough funds for this transaction that are not equal 10000 GTR.</source>
<translation>Imposible localizar fondos suficientes para esta transacción que no es igual a 10000 GTR.</translation>
</message>
<message>
<source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)</source>
<translation>Utilizar un proxy SOCKS5 diferente para alcanzar pares vía el servicio oculto Tor (predefinido: %s)</translation>
</message>
<message>
<source>Warning: -maxtxfee is set very high! Fees this large could be paid on a single transaction.</source>
<translation>Advertencia: -maxtxfee esta muy alto! Comisiones así de altas pueden ser pagadas en una única transacción.</translation>
</message>
<message>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Advertencia: -paytxfee esta muy alta! Esta es la comisión de transacción que pagarás si envías una transacción.</translation>
</message>
<message>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong GTR Core will not work properly.</source>
<translation>Advertencia: Por favor verifique que la fecha y hora de su computadora sean correctas! Si su reloj esta fuera de hora GTR Core no funcionará adecuadamente.</translation>
</message>
<message>
<source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source>
<translation>Advertencia: La red parece no estar concordando totalmente! Algunos mineros parecen estar experimentando problemas.</translation>
</message>
<message>
<source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Advertencia: No estamos apareciendo totalmente en concordancia con nuestros pares! Podrías necesitar una actualización, o otros nodos pueden necesitar una actualización.</translation>
</message>
<message>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Advertencia: error leyendo wallet.dat! Todas las claves leídas correctamente, pero datos de transacción or entradas en el libro de direcciones podrían estar faltando o ser incorrectas.</translation>
</message>
<message>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Advertencia: wallet.dat esta dañado, datos salvados! El wallet.dat original esta grabado como wallet.{timestamp}.bak en %s; si tu saldo o transacciones son incorrectas deberías restaurar de un backup.</translation>
</message>
<message>
<source>Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.</source>
<translation>Pares de la whitelist conectados desde la mascara de red o direcciones IP dada. Puede ser especificado múltiples veces.</translation>
</message>
<message>
<source>Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway</source>
<translation>Pares de la Whitelist no pueden ser banneados por DoS y sus transacciones son siempre transmitidas, aún si ellas están ya en la mempool, util por ejemplo para un gateway.</translation>
</message>
<message>
<source>You must specify a masternodeprivkey in the configuration. Please see documentation for help.</source>
<translation>Debes especificar un masternodeprivkey en la configuración. Por favor mira la documentación por ayuda.</translation>
</message>
<message>
<source>(9229 could be used only on mainnet)</source>
<translation>(9229 puede ser utilizado solo en mainnet)</translation>
</message>
<message>
<source>(default: %s)</source>
<translation>(predeterminado: %s)</translation>
</message>
<message>
<source>(default: 1)</source>
<translation>(predeterminado: 1)</translation>
</message>
<message>
<source>(must be 9229 for mainnet)</source>
<translation>(debe ser 9229 para un mainnet)</translation>
</message>
<message>
<source>Accept command line and JSON-RPC commands</source>
<translation>Aceptar linea de mandato y mandatos JSON-RPC</translation>
</message>
<message>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Aceptar conexiones desde afuera (predeterminado: 1 sin opción -proxy o -connect)</translation>
</message>
<message>
<source>Accept public REST requests (default: %u)</source>
<translation>Aceptar peticiones públicas REST (predeterminado: %u)</translation>
</message>
<message>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Agregar un nodo a conectarse y intentar mantener abierta la conexión</translation>
</message>
<message>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Permitir búsquedas DNS para -addnode, -seednode y -connect</translation>
</message>
<message>
<source>Already have that input.</source>
<translation>Ya tiene esa entrada.</translation>
</message>
<message>
<source>Always query for peer addresses via DNS lookup (default: %u)</source>
<translation>Siempre consultar por direcciones de pares vía búsqueda DNS (predeterminado: %u)</translation>
</message>
<message>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Intentar recuperar claves privadas desde un wallet.dat dañado</translation>
</message>
<message>
<source>Automatically create Tor hidden service (default: %d)</source>
<translation>Crear automáticamente servicio Tor oculto (por defecto: %d)</translation>
</message>
<message>
<source>Block creation options:</source>
<translation>Opciones de creación de bloque:</translation>
</message>
<message>
<source>Calculating missing accumulators...</source>
<translation>Calculando acumuladores pendientes...</translation>
</message>
<message>
<source>Can't denominate: no compatible inputs left.</source>
<translation>No se puede denominar: quedaron entradas no compatibles.</translation>
</message>
<message>
<source>Can't find random Masternode.</source>
<translation>No se puede encontrar un Masternode al azar.</translation>
</message>
<message>
<source>Can't mix while sync in progress.</source>
<translation>No se puede mezclar mientras la sincronización esta en progreso.</translation>
</message>
<message>
<source>Cannot downgrade wallet</source>
<translation>No se puede volver a una versión anterior del monedero</translation>
</message>
<message>
<source>Cannot resolve -bind address: '%s'</source>
<translation>No se puede resolver la dirección -bind: '%s'</translation>
</message>
<message>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>No se puede resolver la dirección -externalip: '%s'</translation>
</message>
<message>
<source>Cannot resolve -whitebind address: '%s'</source>
<translation>No se puede resolver la dirección -whitebind: '%s'</translation>
</message>
<message>
<source>Cannot write default address</source>
<translation>No se puede escribir la dirección predeterminada</translation>
</message>
<message>
<source>Collateral not valid.</source>
<translation>Colateral no válido.</translation>
</message>
<message>
<source>Connect only to the specified node(s)</source>
<translation>Conectar solo a el/los nodo(s) especificados</translation>
</message>
<message>
<source>Connect through SOCKS5 proxy</source>
<translation>Conectar a través de proxy SOCKS5</translation>
</message>
<message>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Conectar a un nodo para obtener direcciones de pares, y desconectar</translation>
</message>
<message>
<source>Connection options:</source>
<translation>Opciones de conexión:</translation>
</message>
<message>
<source>Copyright (C) 2009-%i The Bitcoin Core Developers</source>
<translation>Copyright (C) 2009-%i The Bitcoin Core Developers</translation>
</message>
<message>
<source>Copyright (C) 2014-%i The Dash Core Developers</source>
<translation>Copyright (C) 2014-%i The Dash Core Developers</translation>
</message>
<message>
<source>Copyright (C) 2015-%i The GTR Core Developers</source>
<translation>Copyright (C) 2015-%i The GTR Core Developers</translation>
</message>
<message>
<source>Corrupted block database detected</source>
<translation>Se detectó base de datos de bloques dañado</translation>
</message>
<message>
<source>Could not parse masternode.conf</source>
<translation>No se pudo analizar el contenido de masternode.conf </translation>
</message>
<message>
<source>Debugging/Testing options:</source>
<translation>Opciones de Depuración/Pruebas:</translation>
</message>
<message>
<source>Delete blockchain folders and resync from scratch</source>
<translation>Eliminar directorios de Blockchain y resincronizar desde el principio</translation>
</message>
<message>
<source>Disable OS notifications for incoming transactions (default: %u)</source>
<translation>Desactivar notificaciones del sistema para transacciones extrantes (por defecto: %u)</translation>
</message>
<message>
<source>Disable safemode, override a real safe mode event (default: %u)</source>
<translation>Desactiva modo seguro, invalida un evento modo seguro real (predeterminado: %u)</translation>
</message>
<message>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Descubrir direcciones IP propia (predeterminado: 1 cuando se esta a la escucha y sin opción -externalip)</translation>
</message>
<message>
<source>Do not load the wallet and disable wallet RPC calls</source>
<translation>No cargar la monedero y desactivar las llamadas RPC del monedero</translation>
</message>
<message>
<source>Do you want to rebuild the block database now?</source>
<translation>¿Quieres reconstruir la base de datos de bloques ahora?</translation>
</message>
<message>
<source>Done loading</source>
<translation>Realizando carga</translation>
</message>
<message>
<source>Enable automatic Zerocoin minting (0-1, default: %u)</source>
<translation>Habilitar creación automática de Zerocoin (0-1, por defecto: %u)</translation>
</message>
<message>
<source>Enable publish hash transaction (locked via SwiftX) in <address></source>
<translation>Activar inclusión del hash de la transacción (fijada mediante SwiftX) en <address></translation>
</message>
<message>
<source>Enable publish raw transaction (locked via SwiftX) in <address></source>
<translation>Activar inclusión de la transacción en bruto (fijada mediante SwiftX) en <address></translation>
</message>
<message>
<source>Enable the client to act as a masternode (0-1, default: %u)</source>
<translation>Habilitar al cliente para actuar como un nodo maestro (0-1. predeterminado: %u)</translation>
</message>
<message>
<source>Entries are full.</source>
<translation>Las entradas están llenas.</translation>
</message>
<message>
<source>Error connecting to Masternode.</source>
<translation>Error al conectar al Nodo Maestro</translation>
</message>
<message>
<source>Error initializing block database</source>
<translation>Error al inicializar base de datos de bloques</translation>
</message>
<message>
<source>Error initializing wallet database environment %s!</source>
<translation>Error al inicializar ambiente %s de base de datos del monedero!</translation>
</message>
<message>
<source>Error loading block database</source>
<translation>Error al cargar base de datos de bloques</translation>
</message>
<message>
<source>Error loading wallet.dat</source>
<translation>Error al cargar wallet.dat</translation>
</message>
<message>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Error al cargar wallet.dat: Monedero dañado</translation>
</message>
<message>
<source>Error loading wallet.dat: Wallet requires newer version of GTR Core</source>
<translation>Error al cargar wallet.dat: El monedero requiere una nueva versión del GTR Core</translation>
</message>
<message>
<source>Error opening block database</source>
<translation>Error al abrir la base de datos de bloques</translation>
</message>
<message>
<source>Error reading from database, shutting down.</source>
<translation>Error al leer desde la base de datos, apagando.</translation>
</message>
<message>
<source>Error recovering public key.</source>
<translation>Error al recuperar clave pública.</translation>
</message>
<message>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<source>Error: A fatal internal error occured, see debug.log for details</source>
<translation>Error: Un error interno fatal a ocurrido, ver debug.log para mas detalles</translation>
</message>
<message>
<source>Error: Can't select current denominated inputs</source>
<translation>Error: No se pudo seleccionar las entradas denominadas actuales</translation>
</message>
<message>
<source>Error: Disk space is low!</source>
<translation>Error: El espacio en disco esta bajo!</translation>
</message>
<message>
<source>Error: Unsupported argument -tor found, use -onion.</source>
<translation>Error: Se encontró argumento no soportado -tor, utilizar -onion.</translation>
</message>
<message>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Error: ¡Monedero bloqueado, imposible crear transacción!</translation>
</message>
<message>
<source>Error: You already have pending entries in the Obfuscation pool</source>
<translation>Error: Ya tienes entradas pendientes en el pool de Ofuscación</translation>
</message>
<message>
<source>Failed to calculate accumulator checkpoint</source>
<translation>Error al calcular el punto de control del acumulador</translation>
</message>
<message>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Falló la escucha en cualquiera de los puertos. Usar -listen=0 si quieres esto.</translation>
</message>
<message>
<source>Failed to read block</source>
<translation>Falló al leer el bloque</translation>
</message>
<message>
<source>Fee (in GTR/kB) to add to transactions you send (default: %s)</source>
<translation>Comisión (en GTR/kB) para agregar a la transacción que envías (predeterminado: %s)</translation>
</message>
<message>
<source>Finalizing transaction.</source>
<translation>Finalizando transacción.</translation>
</message>
<message>
<source>Force safe mode (default: %u)</source>
<translation>Forzar modo seguro (predeterminado: %u)</translation>
</message>
<message>
<source>Found enough users, signing ( waiting %s )</source>
<translation>Se han encontrado suficientes usuarios, firmando ( esperando %s )</translation>
</message>
<message>
<source>Found enough users, signing ...</source>
<translation>Se han encontrado suficientes usuarios, firmando ...</translation>
</message>
<message>
<source>Generate coins (default: %u)</source>
<translation>Generando monedas: (predeterminado: %u)</translation>
</message>
<message>
<source>How many blocks to check at startup (default: %u, 0 = all)</source>
<translation>Cuantos bloques a probar al iniciar (predeterminado: %u, 0 = todos)</translation>
</message>
<message>
<source>If <category> is not supplied, output all debugging information.</source>
<translation>Si <category> no es proveído, dar salida a toda la información de depuración.</translation>
</message>
<message>
<source>Importing...</source>
<translation>Importando...</translation>
</message>
<message>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importar bloques desde archivo externo blk000??.dat</translation>
</message>
<message>
<source>Include IP addresses in debug output (default: %u)</source>
<translation>Incluir direcciones IP en salida de depuración (predeterminado: %u)</translation>
</message>
<message>
<source>Incompatible mode.</source>
<translation>Modo incompatible.</translation>
</message>
<message>
<source>Incompatible version.</source>
<translation>Versión imcompatible.</translation>
</message>
<message>
<source>Incorrect or no genesis block found. Wrong datadir for network?</source>
<translation>Bloque de génesis incorrecto o no encontrado. datadir equivocado para red?</translation>
</message>
<message>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<source>Initialization sanity check failed. GTR Core is shutting down.</source>
<translation>La prueba de salud de inicialización ha fallado. GTR Core se cerrará.</translation>
</message>
<message>
<source>Input is not valid.</source>
<translation>La entrada no es válida.</translation>
</message>
<message>
<source>Insufficient funds</source>
<translation>Fondos insuficientes</translation>
</message>
<message>
<source>Insufficient funds.</source>
<translation>Fondos insuficientes.</translation>
</message>
<message>
<source>Invalid -onion address or hostname: '%s'</source>
<translation>Dirección o nombre de equipo -onion inválido: '%s'</translation>
</message>
<message>
<source>Invalid amount for -maxtxfee=<amount>: '%s'</source>
<translation>Importe inválido para -maxtxfee=<amount>: '%s'</translation>
</message>
<message>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Importe inválido para -minrelaytxfee=<amount>: '%s'</translation>
</message>
<message>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Importe inválido para -mintxfee=<amount>: '%s'</translation>
</message>
<message>
<source>Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)</source>
<translation>Importe inválido para -paytxfee=<amount>: '%s' (debe ser al menos %s)</translation>
</message>
<message>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Importe inválido para -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<source>Invalid amount for -reservebalance=<amount></source>
<translation>Importe inválido para -reservebalance=<amount></translation>
</message>
<message>
<source>Invalid amount</source>
<translation>Cantidad incorrecta</translation>
</message>
<message>
<source>Invalid masternodeprivkey. Please see documenation.</source>
<translation>Valor de masternodeprivkey es inválido. Por favor ver la documentación.</translation>
</message>
<message>
<source>Invalid netmask specified in -whitelist: '%s'</source>
<translation>Máscara de red inválida especificada en -whitelist: '%s'</translation>
</message>
<message>
<source>Invalid port detected in masternode.conf</source>
<translation>Puerto inválido ha sido detectado en masternode.conf</translation>
</message>
<message>
<source>Invalid private key.</source>
<translation>Clave pública inválida.</translation>
</message>
<message>
<source>Invalid script detected.</source>
<translation>Script inválido detectado.</translation>
</message>
<message>
<source>Percentage of automatically minted Zerocoin (1-100, default: %u)</source>
<translation>Porcentaje de Zerocoin creadas automáticamente (10-100, por defecto: %u)</translation>
</message>
<message>
<source>Reindex the GTR and zGTR money supply statistics</source>
<translation>Reindexar as estatísticas de fornecimento de dinheiro GTR e zGTR</translation>
</message>
<message>
<source>Reindexing zerocoin database...</source>
<translation>Reindexando la base de datos zerocoin...</translation>
</message>
<message>
<source>Reindexing zerocoin failed</source>
<translation>La reindexación zerocoin ha fallado</translation>
</message>
<message>
<source>Selected coins value is less than payment target</source>
<translation>El valor de las monedas seleccionadas es menor que el monto a pagar</translation>
</message>
<message>
<source>SwiftX options:</source>
<translation>Opciones SwiftX:</translation>
</message>
<message>
<source>This is a pre-release test build - use at your own risk - do not use for staking or merchant applications!</source>
<translation>Esto es una versión pre-release de prueba - use bajo su propia responsabilidad - ¡No lo utilice para recompensa de participación ni aplicaciones de comercio!</translation>
</message>
<message>
<source> mints deleted
</source>
<translation>creaciones de moneda borradas
</translation>
</message>
<message>
<source> mints updated, </source>
<translation>creaciones de moneda actualizadas,</translation>
</message>
<message>
<source> unconfirmed transactions removed
</source>
<translation>transacciones sin confirmar eliminadas
</translation>
</message>
<message>
<source>Disable all GTR specific functionality (Masternodes, Zerocoin, SwiftX, Budgeting) (0-1, default: %u)</source>
<translation>Desabilitar toda la funcionalidad especifica GTR (Masternodes, Obfuscation, SwiftX, Budgeting) (0-1, predeterminado: %u)</translation>
</message>
<message>
<source>Enable SwiftX, show confirmations for locked transactions (bool, default: %s)</source>
<translation>Activar swifttx, mostrar confirmaciones para transacciones bloqueadas (bool, predeterminado: %s)</translation>
</message>
<message>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>¡La transacción fue rechazada! Esto puede ocurrir si alguna de sus monedas ya se gastó con anterioridad, por ejemplo, si estuvo usted usando una copia de su monedero wallet.dat y gastó allí monedas pero no las marcó como gastadas aquí.</translation>
</message>
<message>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Error: ¡Esta transacción requiere una tasa de al menos %s debido a la cantidad enviada, su complejidad, o el uso de fondos recibidos recientemente!</translation>
</message>
<message>
<source>Error: Unsupported argument -checklevel found. Checklevel must be level 4.</source>
<translation>Error: Se ha especificado un argumento no soportado -checklevel. El Checklevel debe ser siempre 4. </translation>
</message>
<message>
<source>Execute command when the best block changes and its size is over (%s in cmd is replaced by block hash, %d with the block size)</source>
<translation>Ejecute el comando cuando el mejor bloque cambie y su tamaño haya terminado (%s en cmd se reemplaza por el hash del bloque, %d con el tamaño de bloque)</translation>
</message>
<message>
<source>Failed to find coin set amongst held coins with less than maxNumber of Spends</source>
<translation>No se puede encontrar el conjunto de monedas necesarias entre las monedas disponibles con menos de maxNumber para poder gastarlas</translation>
</message>
<message>
<source>In rare cases, a spend with 7 coins exceeds our maximum allowable transaction size, please retry spend using 6 or less coins</source>
<translation>En casos excepcionales, un envío con más de 7 monedas excede el tamaño máximo de transacción permitidas, intente gastar 6 monedas o menos.</translation>
</message>
<message>
<source>Preferred Denomination for automatically minted Zerocoin (1/5/10/50/100/500/1000/5000), 0 for no preference. default: %u)</source>
<translation>Denominación preferida para acuñado automatico de Zerocoin (1/5/10/50/100/500/1000/5000), 0 para ninguna preferencia. predeterminado: %u)</translation>
</message>
<message>
<source>Specify custom backup path to add a copy of any automatic zGTR backup. If set as dir, every backup generates a timestamped file. If set as file, will rewrite to that file every backup. If backuppath is set as well, 4 backups will happen</source>
<translation>Especifique la ruta de copia de seguridad personalizada para agregar una copia de cualquier copia de seguridad zGTR automática. Si se establece como directorio, cada copia de seguridad genera un archivo con marcas de tiempo. Si se establece como archivo, se reescribirá en ese archivo cada copia de seguridad. Si también se establece backuppath, se realizarán 4 copias de seguridad</translation>
</message>
<message>
<source>Specify custom backup path to add a copy of any wallet backup. If set as dir, every backup generates a timestamped file. If set as file, will rewrite to that file every backup.</source>
<translation>Especifique una ruta de copia de seguridad personalizada para agregar una copia de cualquier copia de seguridad de monedero. Si se establece como directorio, cada copia de seguridad genera un archivo con marcas de tiempo. Si se establece como archivo, se reescribirá en ese archivo cada copia de seguridad generada.</translation>
</message>
<message>
<source>SwiftX requires inputs with at least 6 confirmations, you might need to wait a few minutes and try again.</source>
<translation>SwiftX requiere entradas con al menos 6 confirmaciones, es posible que deba esperar unos minutos e intente de nuevo.</translation>
</message>
<message>
<source><category> can be:</source>
<translation><category>puede ser: </translation>
</message>
<message>
<source>Attempt to force blockchain corruption recovery</source>
<translation>Intentar forzar la recuperación de la cadena de bloques corrupta</translation>
</message>
<message>
<source>CoinSpend: Accumulator witness does not verify</source>
<translation>CoinSpend: El testigo del acumulador no verifica</translation>
</message>
<message>
<source>Display the stake modifier calculations in the debug.log file.</source>
<translation>Incluir los cálculos de recompensa por participación en el fichero debug.log.</translation>
</message>
<message>
<source>Display verbose coin stake messages in the debug.log file.</source>
<translation>Incluir mensajes explícitos de recompensa por participación en el fichero debug.log.</translation>
</message>
<message>
<source>Enable publish hash block in <address></source>
<translation>Activar inclusión del hash del bloque en <address></translation>
</message>
<message>
<source>Enable publish hash transaction in <address></source>
<translation>Activar inclusión del hash de la transacción en <address></translation>
</message>
<message>
<source>Enable publish raw block in <address></source>
<translation>Activar inclusión del bloque en bruto en <address></translation>
</message>
<message>
<source>Enable publish raw transaction in <address></source>
<translation>Activar inclusión de la transacción en bruto en <address></translation>
</message>
<message>
<source>Enable staking functionality (0-1, default: %u)</source>
<translation>Activar funcionalidad de recompensa por participación (0-1, por defecto: %u)</translation>
</message>
<message>
<source>Error: A fatal internal error occurred, see debug.log for details</source>
<translation>Error: Un error interno fatal a ocurrido, ver debug.log para mas detalles</translation>
</message>
<message>
<source>Error: No valid utxo!</source>
<translation>Error: ¡utxo no válido!</translation>
</message>
<message>
<source>Failed to create mint</source>
<translation>Error al acuñar</translation>
</message>
<message>
<source>Failed to deserialize</source>
<translation>Error al deserializar</translation>
</message>
<message>
<source>Failed to find Zerocoins in wallet.dat</source>
<translation>Error al encontrar Zerocoins en wallet.dat</translation>
</message>
<message>
<source>Failed to select a zerocoin</source>
<translation>Error al seleccionar una zerocoin</translation>
</message>
<message>
<source>Failed to wipe zerocoinDB</source>
<translation>Error al borrar zerocoinDB</translation>
</message>
<message>
<source>Failed to write coin serial number into wallet</source>
<translation>Error al escribir el número de serie de la moneda en el monedero</translation>
</message>
<message>
<source>Keep at most <n> unconnectable transactions in memory (default: %u)</source>
<translation>Mantener como máximo <n> transacciones no conectables en memoria (predeterminado: %u)</translation>
</message>
<message>
<source>Last Obfuscation was too recent.</source>
<translation>La última Ofuscación fue demasiado reciente.</translation>
</message>
<message>
<source>Last successful Obfuscation action was too recent.</source>
<translation>La última acción de Ofuscación exitosa fue demasiado reciente.</translation>
</message>
<message>
<source>Limit size of signature cache to <n> entries (default: %u)</source>
<translation>Tamaño límite del cache de firmas a <n> entradas (predeterminado: %u)</translation>
</message>
<message>
<source>Line: %d</source>
<translation>Linea: %d</translation>
</message>
<message>
<source>Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)</source>
<translation>Escuchar por conexiones JSON-RPC en <port> (predeterminado: %u o testnet: %u)</translation>
</message>
<message>
<source>Listen for connections on <port> (default: %u or testnet: %u)</source>
<translation>Escuchar por conexiones en <port> (predeterminado: %u o testnet: %u)</translation>
</message>
<message>
<source>Loading addresses...</source>
<translation>Cargando direcciones...</translation>
</message>
<message>
<source>Loading block index...</source>
<translation>Cargando índice de bloque...</translation>
</message>
<message>
<source>Loading budget cache...</source>
<translation>Cargando cache de presupuestos...</translation>
</message>
<message>
<source>Loading masternode cache...</source>
<translation>Cargando cache de nodos maestros...</translation>
</message>
<message>
<source>Loading masternode payment cache...</source>
<translation>Cargando cache de pagos de nodos maestros</translation>
</message>
<message>
<source>Loading sporks...</source>
<translation>Cargando sporks...</translation>
</message>
<message>
<source>Loading wallet... (%3.2f %%)</source>
<translation>Cargando monedero: (%3.2f %%)</translation>
</message>
<message>
<source>Loading wallet...</source>
<translation>Cargando monedero...</translation>
</message>
<message>
<source>Location of the auth cookie (default: data dir)</source>
<translation>Ubicación de la cookie de autenticación (predeterminado: data dir)</translation>
</message>
<message>
<source>Lock is already in place.</source>
<translation>Bloqueo esta en su lugar.</translation>
</message>
<message>
<source>Lock masternodes from masternode configuration file (default: %u)</source>
<translation>Bloquear nodos maestros desde el archivo de configuración de nodo maestro (predeterminado: %u)</translation>
</message>
<message>
<source>Lookup(): Invalid -proxy address or hostname: '%s'</source>
<translation>Lookup(): Inválido -dirección proxy o nombre de host '%s'</translation>
</message>
<message>
<source>Maintain at most <n> connections to peers (default: %u)</source>
<translation>Mantener como máximo <n> conexiones a pares (predeterminado: %u)</translation>
</message>
<message>
<source>Masternode options:</source>
<translation>Opciones de Masternode:</translation>
</message>
<message>
<source>Masternode queue is full.</source>
<translation>La cola del Masternode esta llena.</translation>
</message>
<message>
<source>Masternode:</source>
<translation>Masternode:</translation>
</message>
<message>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)</source>
<translation>Buffer de recepción máximo por conexión, <n>*1000 bytes (predeterminado: %u)</translation>
</message>
<message>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: %u)</source>
<translation>Máximo buffer de envío por conexión, <n>*1000 bytes (predeterminado: %u)</translation>
</message>
<message>
<source>Mint did not make it into blockchain</source>
<translation>El Acuñado no se realizó en blockchain</translation>
</message>
<message>
<source>Missing input transaction information.</source>
<translation>Información de transacciones de entrada perdidas.</translation>
</message>
<message>
<source>Mixing in progress...</source>
<translation>Mezcla en progreso...</translation>
</message>
<message>
<source>Need address because change is not exact</source>
<translation>Necesita dirección porque el cambio no es exacto</translation>
</message>
<message>
<source>Need to specify a port with -whitebind: '%s'</source>
<translation>Necesita especificar un puerto con -whitebind: '%s'</translation>
</message>
<message>
<source>No Masternodes detected.</source>
<translation>No se han detectado Masternodes.</translation>
</message>
<message>
<source>No compatible Masternode found.</source>
<translation>No hay un Masternode compatible encontrado.</translation>
</message>
<message>
<source>No funds detected in need of denominating.</source>
<translation>No hay fondos detectados necesarios de denominación.</translation>
</message>
<message>
<source>No matching denominations found for mixing.</source>
<translation>No hemos encontrado billetes coincidentes para mezclado.</translation>
</message>
<message>
<source>Node relay options:</source>
<translation>Opciones de transmisión del nodo:</translation>
</message>
<message>
<source>Non-standard public key detected.</source>
<translation>Clave pública no-estandar ha sido detectada.</translation>
</message>
<message>
<source>Not compatible with existing transactions.</source>
<translation>No es compatible con las transacciones salientes.</translation>
</message>
<message>
<source>Not enough file descriptors available.</source>
<translation>No hay suficientes descriptores de archivo disponibles.</translation>
</message>
<message>
<source>Not in the Masternode list.</source>
<translation>No en la lista Masternode.</translation>
</message>
<message>
<source>Number of automatic wallet backups (default: 10)</source>
<translation>Número de Copias de seguridad automáticas de monedero (predeterminado: 10)</translation>
</message>
<message>
<source>Number of custom location backups to retain (default: %d)</source>
<translation>Número de copias de seguridad personalizadas que se deben retener (predeterminado: %d)</translation>
</message>
<message>
<source>Obfuscation is idle.</source>
<translation>La Ofuscación está sin uso.</translation>
</message>
<message>
<source>Obfuscation request complete:</source>
<translation>Pedido de Ofuscacion completado:</translation>
</message>
<message>
<source>Obfuscation request incomplete:</source>
<translation>Pedido de Ofuscación incompleto:</translation>
</message>
<message>
<source>Only accept block chain matching built-in checkpoints (default: %u)</source>
<translation>Solo aceptar cadena de bloque emparejada con puntos de verificación construidos (predeterminado: %u)</translation>
</message>
<message>
<source>Only connect to nodes in network <net> (ipv4, ipv6 or onion)</source>
<translation>Solo conectar a nodos en la red <net> (ipv4, ipv6 o onion)</translation>
</message>
<message>
<source>Options:</source>
<translation>Opciones:</translation>
</message>
<message>
<source>Password for JSON-RPC connections</source>
<translation>Contraseña para conexiones JSON-RPC</translation>
</message>
<message>
<source>isValid(): Invalid -proxy address or hostname: '%s'</source>
<translation>isValid(): Inválido -dirección proxy o nombre de host '%s'</translation>
</message>
<message>
<source>Preparing for resync...</source>
<translation>Preparando para resincronizar...</translation>
</message>
<message>
<source>Prepend debug output with timestamp (default: %u)</source>
<translation>Agregar timestamp a la salida de depuración (predeterminado: %u)</translation>
</message>
<message>
<source>Print version and exit</source>
<translation>Mostrar versión y salir</translation>
</message>
<message>
<source>RPC server options:</source>
<translation>Opciones del servidor RPC:</translation>
</message>
<message>
<source>Randomly drop 1 of every <n> network messages</source>
<translation>Aleatoriamente dejar caer 1 de cada <n> mensajes de red</translation>
</message>
<message>
<source>Randomly fuzz 1 of every <n> network messages</source>
<translation>Aleatoriamente esfumar 1 de cada <n> mensajes de red</translation>
</message>
<message>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Reconstruir el indice de la cadena de bloque desde el corriente archivo blk000??.dat</translation>
</message>
<message>
<source>Receive and display P2P network alerts (default: %u)</source>
<translation>Recibir y mostrar alertas de red P2P (predeterminado: %u)</translation>
</message>
<message>
<source>Reindex the accumulator database</source>
<translation>Reindexar la base de datos del acumulador</translation>
</message>
<message>
<source>Relay and mine data carrier transactions (default: %u)</source>
<translation>Transmitir y minar datos de transacciones enviadas (predeterminado: %u)</translation>
</message>
<message>
<source>Relay non-P2SH multisig (default: %u)</source>
<translation>Transmitir no-P2SH multisig (predeterminado: %u)</translation>
</message>
<message>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Reescanear la cadena de bloques en busca de transacciones perdidas del monedero</translation>
</message>
<message>
<source>Rescanning...</source>
<translation>Reescaneado...</translation>
</message>
<message>
<source>ResetMintZerocoin finished: </source>
<translation>ResetMintZerocoin finalizado:</translation>
</message>
<message>
<source>ResetSpentZerocoin finished: </source>
<translation>ResetSpentZerocoin finalizado:</translation>
|
</message>
<message>
<source>Run a thread to flush wallet periodically (default: %u)</source>
<translation>Ejecutar un hilo de ejecución para nivelar el monedero periódicamente (predeterminado: %u)</translation>
</message>
<message>
<source>Run in the background as a daemon and accept commands</source>
<translation>Ejecutar en segundo plano como demonio y aceptar comandos</translation>
</message>
<message>
<source>Send transactions as zero-fee transactions if possible (default: %u)</source>
<translation>Enviar transacciones como transacciones cero-comisión si es posible (predeterminado: %u)</translation>
</message>
<message>
<source>Session not complete!</source>
<translation>Sesión no completa!</translation>
</message>
<message>
<source>Session timed out.</source>
<translation>La sesión expiró.</translation>
</message>
<message>
<source>Set database cache size in megabytes (%d to %d, default: %d)</source>
<translation>Definir tamaño de cache de base de datos en megabytes (%d a %d, predeterminado: %d)</translation>
</message>
<message>
<source>Set external address:port to get to this masternode (example: %s)</source>
<translation>Definir dirección:puerto externo para alcanzar este nodo maestro (ejemplo: %s)</translation>
</message>
<message>
<source>Set key pool size to <n> (default: %u)</source>
<translation>Definir el tamaño del pool de llaves a <n> (predeterminado: %u)</translation>
</message>
<message>
<source>Set maximum block size in bytes (default: %d)</source>
<translation>Ajustar el tamaño máximo de bloque en bytes (predeterminado: %d)</translation>
</message>
<message>
<source>Set minimum block size in bytes (default: %u)</source>
<translation>Ajustar el tamaño mínimo de bloque en bytes (predeterminado: %u)</translation>
</message>
<message>
<source>Set the Maximum reorg depth (default: %u)</source>
<translation>Establezca la profundidad máxima de reorganización (valor predeterminado: %u)</translation>
</message>
<message>
<source>Set the masternode private key</source>
<translation>Ajustar la clave privada de masternode</translation>
</message>
<message>
<source>Set the number of threads to service RPC calls (default: %d)</source>
<translation>Definir el número de hilos para llamadas al servicio RPC (predeterminado: %d)</translation>
</message>
<message>
<source>Sets the DB_PRIVATE flag in the wallet db environment (default: %u)</source>
<translation>Definir la bandera DB_PRIVATE en la db de la monedero del ambiente (predeterminado: %u)</translation>
</message>
<message>
<source>Show all debugging options (usage: --help -help-debug)</source>
<translation>Mostrar todas las opciones de depuración (uso: --help-debug)</translation>
</message>
<message>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Encojer el archivo debug.log en el cliente al inicio (predeterminado: 1 cuando no hay -debug)</translation>
</message>
<message>
<source>Signing failed.</source>
<translation>La firma falló.</translation>
</message>
<message>
<source>Signing timed out.</source>
<translation>Se agotó el tiempo para la firma.</translation>
</message>
<message>
<source>Signing transaction failed</source>
<translation>La firma de la transacción falló</translation>
</message>
<message>
<source>Specify configuration file (default: %s)</source>
<translation>Especifique el archivo de configuración (predeterminado: %s)</translation>
</message>
<message>
<source>Specify connection timeout in milliseconds (minimum: 1, default: %d)</source>
<translation>Especificar el timeout de conexión en mili segundos (mínimo: 1, predeterminado: %d) </translation>
</message>
<message>
<source>Specify data directory</source>
<translation>Especifique el directorio de datos</translation>
</message>
<message>
<source>Specify masternode configuration file (default: %s)</source>
<translation>Especifique el archivo de configuración masternode (predeterminado: %s)</translation>
</message>
<message>
<source>Specify pid file (default: %s)</source>
<translation>Especifique el archivo pid (predeterminado: %s)</translation>
</message>
<message>
<source>Specify wallet file (within data directory)</source>
<translation>Especifique el archivo del monedero (dentro del directorio de datos)</translation>
</message>
<message>
<source>Specify your own public address</source>
<translation>Especifique su propia dirección pública</translation>
</message>
<message>
<source>Spend Valid</source>
<translation>Gastar Válido</translation>
</message>
<message>
<source>Spend unconfirmed change when sending transactions (default: %u)</source>
<translation>Gastar cambio no confirmado cuando se envían transacciones (predeterminado: %u)</translation>
</message>
<message>
<source>Staking options:</source>
<translation>Opciones de recompensa por participación:</translation>
</message>
<message>
<source>Stop running after importing blocks from disk (default: %u)</source>
<translation>Parar la ejecución después de importar bloques desde el disco (predeterminado: %u)</translation>
</message>
<message>
<source>Submitted following entries to masternode: %u / %d</source>
<translation>Se enviaron las siguientes entradas a masternode: %u / %d</translation>
</message>
<message>
<source>Submitted to masternode, waiting for more entries ( %u / %d ) %s</source>
<translation>Enviado a masternode, esperando por mas entradas (%u / %d) %s</translation>
</message>
<message>
<source>Submitted to masternode, waiting in queue %s</source>
<translation>Enviado a masternode, quedando en espera %s</translation>
</message>
<message>
<source>Synchronization failed</source>
<translation>Falló la sincronización</translation>
</message>
<message>
<source>Synchronization finished</source>
<translation>Sincronización finalizada!</translation>
</message>
<message>
<source>Synchronization pending...</source>
<translation>Sincronización pendiente...</translation>
</message>
<message>
<source>Synchronizing budgets...</source>
<translation>Sincronizando presupuestos...</translation>
</message>
<message>
<source>Synchronizing masternode winners...</source>
<translation>Sincronizando ganadores masternode...</translation>
</message>
<message>
<source>Synchronizing masternodes...</source>
<translation>Sincronizando masternodes...</translation>
</message>
<message>
<source>Synchronizing sporks...</source>
<translation>Sincronizando con la red... </translation>
</message>
<message>
<source>Syncing zGTR wallet...</source>
<translation>Sincronizando el monedero zGTR...</translation>
</message>
<message>
<source>The coin spend has been used</source>
<translation>El gasto de moneda se ha usado</translation>
</message>
<message>
<source>The new spend coin transaction did not verify</source>
<translation>La nueva transacción de gasto de moneda no se verificó</translation>
</message>
<message>
<source>The selected mint coin is an invalid coin</source>
<translation>La moneda acuñada seleccionada es una moneda no válida</translation>
</message>
<message>
<source>The transaction did not verify</source>
<translation>La transacción no se verificó</translation>
</message>
<message>
<source>This help message</source>
<translation>Este mensaje de ayuda</translation>
</message>
<message>
<source>This is experimental software.</source>
<translation>Esto es software experimental.</translation>
</message>
<message>
<source>This is intended for regression testing tools and app development.</source>
<translation>Esto esta destinado para herramientas de prueba de regresión y desarrollo de aplicaciones.</translation>
</message>
<message>
<source>This is not a Masternode.</source>
<translation>Este no es un nodo maestro.</translation>
</message>
<message>
<source>Threshold for disconnecting misbehaving peers (default: %u)</source>
<translation>Limite para desconexión de peers de mal desempeño (predeterminado: %u)</translation>
</message>
<message>
<source>Too many spends needed</source>
<translation>Demasiados gastos necesarios</translation>
</message>
<message>
<source>Tor control port password (default: empty)</source>
<translation>Contraseña del puerto de control Tor (por defecto: vacío)</translation>
</message>
<message>
<source>Tor control port to use if onion listening enabled (default: %s)</source>
<translation>Puerto de control Tor a utilizar si está activada la escucha Onion (por defecto: %s)</translation>
</message>
<message>
<source>Transaction Created</source>
<translation>Transacción creada</translation>
</message>
<message>
<source>Transaction Mint Started</source>
<translation>Cotação de transação iniciada</translation>
</message>
<message>
<source>Transaction amount too small</source>
<translation>El monto de la transacción es demasiado pequeño</translation>
</message>
<message>
<source>Transaction amounts must be positive</source>
<translation>El monto de la transacción debe ser positivo</translation>
</message>
<message>
<source>Transaction created successfully.</source>
<translation>Transacción creada satisfactoriamente.</translation>
</message>
<message>
<source>Transaction fees are too high.</source>
<translation>La comision de transacción es demasiado alta.</translation>
</message>
<message>
<source>Transaction not valid.</source>
<translation>La transacción no es valida.</translation>
</message>
<message>
<source>Transaction too large for fee policy</source>
<translation>La transacción es demasiado grande para la política de comisión.</translation>
</message>
<message>
<source>Transaction too large</source>
<translation>La transacción es demasiado grande</translation>
</message>
<message>
<source>Transmitting final transaction.</source>
<translation>Transmitiendo la transacción final.</translation>
</message>
<message>
<source>Try to spend with a higher security level to include more coins</source>
<translation>Intenta gastar con un nivel de seguridad más alto para incluir más monedas</translation>
</message>
<message>
<source>Trying to spend an already spent serial #, try again.</source>
<translation>Tentando gastar um número de série já gasto, tente novamente.</translation>
</message>
<message>
<source>Unable to bind to %s on this computer (bind returned error %s)</source>
<translation>Imposible enlazar %s en esta computadora (enlace retorna error %s)</translation>
</message>
<message>
<source>Unable to find transaction containing mint</source>
<translation>No se puede encontrar la transacción que contiene la acuñación</translation>
</message>
<message>
<source>Unable to sign spork message, wrong key?</source>
<translation>Imposible firmar el mensaje spork, ¿llave equivocada?</translation>
</message>
<message>
<source>Unable to start HTTP server. See debug log for details.</source>
<translation>No se puede iniciar el servidor HTTP. Ver registro de depuración para más detalles.</translation>
</message>
<message>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Red especificada desconocida en -onlynet: '%s'</translation>
</message>
<message>
<source>Unknown state: id = %u</source>
<translation>Estado desconocido: id = %u</translation>
</message>
<message>
<source>Upgrade wallet to latest format</source>
<translation>Actualizar el monedero al último formato</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: %u)</source>
<translation>Usar UPnP para mapear el puerto de escucha (predeterminado: %u)</translation>
</message>
<message>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Usar UPnP para mapear el puerto de escucha (predeterminado: 1 cuando esta en escucha)</translation>
</message>
<message>
<source>Use a custom max chain reorganization depth (default: %u)</source>
<translation>Utilice una profundidad de reorganización de cadena máxima personalizada (valor predeterminado: %u)</translation>
</message>
<message>
<source>Use the test network</source>
<translation>Usar la red de prueba</translation>
</message>
<message>
<source>Username for JSON-RPC connections</source>
<translation>Nombre de usuario para conexiones JSON-RPC</translation>
</message>
<message>
<source>Value is below the smallest available denomination (= 1) of zGTR</source>
<translation>El valor está por debajo de la denominación más pequeña disponible (= 1) de zGTR</translation>
</message>
<message>
<source>Value more than Obfuscation pool maximum allows.</source>
<translation>Valor mayor al máximo pool de Ofuscación permitido.</translation>
</message>
<message>
<source>Verifying blocks...</source>
<translation>Verificando bloques...</translation>
</message>
<message>
<source>Verifying wallet...</source>
<translation>Verificando el monedero...</translation>
</message>
<message>
<source>Version 1 zGTR require a security level of 100 to successfully spend.</source>
<translation>La versión 1 zGTR requiere un nivel de seguridad de 100 para gastar exitosamente.</translation>
</message>
<message>
<source>Wallet %s resides outside data directory %s</source>
<translation>El monedero %s esta ubicada fuera del directorio de datos %s</translation>
</message>
<message>
<source>Wallet is locked.</source>
<translation>Monedero bloqueado.</translation>
</message>
<message>
<source>Wallet needed to be rewritten: restart GTR Core to complete</source>
<translation>El Monedero necesita ser reescrito: reinicie GTR Core para completar</translation>
</message>
<message>
<source>Wallet options:</source>
<translation>Opciones del Monedero:</translation>
</message>
<message>
<source>Wallet window title</source>
<translation>Monedero titulo de ventana</translation>
</message>
<message>
<source>Warning</source>
<translation>Advertencia</translation>
</message>
<message>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Advertencia: Esta versión es obsoleta, se requiere actualizar!</translation>
</message>
<message>
<source>Warning: Unsupported argument -benchmark ignored, use -debug=bench.</source>
<translation>Advertencia: Argumento no soportado -benchmark ignorado, use -debug=bench </translation>
</message>
<message>
<source>Warning: Unsupported argument -debugnet ignored, use -debug=net.</source>
<translation>Advertencia: Argumento no soportado -debugnet ignorado, use -debug=net.</translation>
</message>
<message>
<source>Will retry...</source>
<translation>Probando...</translation>
</message>
<message>
<source>You don't have enough Zerocoins in your wallet</source>
<translation>No tienes suficientes Zerocoins en tu monedero</translation>
</message>
<message>
<source>You need to rebuild the database using -reindex to change -txindex</source>
<translation>Usted necesita reconstruir la base de datos usando -reindex para cambiar -txindex</translation>
</message>
<message>
<source>Your entries added successfully.</source>
<translation>Su entrada a sido agregada satisfactoriamente.</translation>
</message>
<message>
<source>Your transaction was accepted into the pool!</source>
<translation>Su transacción ha sido aceptada en el pool!</translation>
</message>
<message>
<source>Zapping all transactions from wallet...</source>
<translation>Saltando todas las transacciones del monedero...</translation>
</message>
<message>
<source>ZeroMQ notification options:</source>
<translation>Opciones de notificación ZeroMQ:</translation>
</message>
<message>
<source>Zerocoin options:</source>
<translation>Opciones Zerocoin:</translation>
</message>
<message>
<source>on startup</source>
<translation>al inicio</translation>
</message>
<message>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat esta corrupto, fallo al guardar</translation>
</message>
</context>
</TS>
| |
workflow_inventory.py
|
import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: # add/update inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: # view inventory by library
display_library_inventory(library_id)
elif int(choice) == 4: # reconcile inventory
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: # reconcile inventory with compare score calculation
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: # manage duplicate inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
# Purpose: Identify files/folders that no longer exist and update DB accordingly
# library_id = prompt_for_library()
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
|
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
# df = df.drop(['_sa_instance_state'], axis=1)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
# df.sort_values(by=['compare_score', 'size'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
# print(tabulate(sample.head(), headers='keys', tablefmt='psql'))
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row['file']} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
# inv = services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict()
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file['classification']['tags']}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
# for image in inv:
# inventory_id = image['inventory_id']
#
# try:
# if inv := services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict():
# cv2.imshow(image['file'], image['full_path'])
# # cv2.imwrite("tests/samples/ml/test/output.jpg", image)
# cv2.waitKey(0)
# # cv2.destroyAllWindows()
# if inv['classification']:
# print(f"Current Tags: {inv['classification']['tags']}")
#
# tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
# data = {
# 'inventory_id': inventory_id,
# 'classification': {'tags': tag_values},
# 'model_assignment': input("Model Assignment Name: ") if incl_assignment else inv['model_assignment']
# }
# services.inventory.update_inventory_classification(**data)
#
# cv2.destroyAllWindows()
# except:
# raise
#5351dd023ef1440393b81ec0acbe2f4a
|
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
|
methylcaps_model_-checkpoint.py
|
import pandas as pd
from pymethylprocess.MethylationDataTypes import MethylationArray
from sklearn.metrics import mean_absolute_error, r2_score
import warnings
warnings.filterwarnings("ignore")
from pybedtools import BedTool
import numpy as np
from functools import reduce
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import os
import pysnooper
import argparse
import pickle
from sklearn.metrics import classification_report
import click
import methylcapsnet
from methylcapsnet.build_capsules import *
from methylcapsnet.methylcaps_data_models import *
import sqlite3
import os
import glob
import dask
from dask.diagnostics import ProgressBar
from pathos.multiprocessing import Pool
import multiprocessing
import dask.bag as db
from distributed import Client, LocalCluster, get_task_stream
RANDOM_SEED=42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@pysnooper.snoop('train.log')
def model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',
val_methyl_array='train_val_test_sets/val_methyl_array.pkl',
interest_col='disease',
n_epochs=10,
n_bins=0,
bin_len=1000000,
min_capsule_len=300,
primary_caps_out_len=45,
caps_out_len=45,
hidden_topology='30,80,50',
gamma=1e-2,
decoder_topology='100,300',
learning_rate=1e-2,
routing_iterations=3,
overlap=0.,
custom_loss='none',
gamma2=1e-2,
job=0,
capsule_choice=['genomic_binned'],
custom_capsule_file='',
test_methyl_array='',
predict=False,
batch_size=16,
limited_capsule_names_file='',
gsea_superset='',
tissue='',
number_sets=25,
use_set=False,
gene_context=False,
select_subtypes=[],
fit_spw=False,
l1_l2='',
custom_capsule_file2='',
min_capsules=5):
|
capsule_choice=list(capsule_choice)
#custom_capsule_file=list(custom_capsule_file)
hlt_list=filter(None,hidden_topology.split(','))
if hlt_list:
hidden_topology=list(map(int,hlt_list))
else:
hidden_topology=[]
hlt_list=filter(None,decoder_topology.split(','))
if hlt_list:
decoder_topology=list(map(int,hlt_list))
else:
decoder_topology=[]
hidden_caps_layers=[]
include_last=False
ma=MethylationArray.from_pickle(train_methyl_array)
ma_v=MethylationArray.from_pickle(val_methyl_array)
if test_methyl_array and predict:
ma_t=MethylationArray.from_pickle(test_methyl_array)
try:
ma.remove_na_samples(interest_col)
ma_v.remove_na_samples(interest_col)
if test_methyl_array and predict:
ma_t.remove_na_samples(interest_col)
except:
pass
if select_subtypes:
print(ma.pheno[interest_col].unique())
ma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]
ma.beta=ma.beta.loc[ma.pheno.index]
ma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]
ma_v.beta=ma_v.beta.loc[ma_v.pheno.index]
print(ma.pheno[interest_col].unique())
if test_methyl_array and predict:
ma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]
ma_t.beta=ma_t.beta.loc[ma_t.pheno.index]
if custom_capsule_file2 and os.path.exists(custom_capsule_file2):
capsules_dict=torch.load(custom_capsule_file2)
final_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']
if min_capsule_len>1:
include_capsules=[len(x)>min_capsule_len for x in final_modules]
final_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]
module_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]
modulecpgs=(reduce(np.union1d,final_modules)).tolist()
else:
final_modules, modulecpgs, module_names=build_capsules(capsule_choice,
overlap,
bin_len,
ma,
include_last,
min_capsule_len,
custom_capsule_file,
gsea_superset,
tissue,
gene_context,
use_set,
number_sets,
limited_capsule_names_file)
if custom_capsule_file2:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)
assert len(final_modules) >= min_capsules , "Below the number of allowed capsules."
if fit_spw:
modulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))
if not include_last: # ERROR HAPPENS HERE!
ma.beta=ma.beta.loc[:,modulecpgs]
ma_v.beta=ma_v.beta.loc[:,modulecpgs]
if test_methyl_array and predict:
ma_t.beta=ma_t.beta.loc[:,modulecpgs]
# https://github.com/higgsfield/Capsule-Network-Tutorial/blob/master/Capsule%20Network.ipynb
original_interest_col=interest_col
if n_bins:
new_interest_col=interest_col+'_binned'
ma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)
ma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)
if test_methyl_array and predict:
ma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)
interest_col=new_interest_col
datasets=dict()
datasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
print(datasets['train'].X.isnull().sum().sum())
datasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
if test_methyl_array and predict:
datasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
dataloaders=dict()
dataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)
dataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_primary=len(final_modules)
if test_methyl_array and predict:
dataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_inputs=list(map(len,final_modules))
n_out_caps=len(datasets['train'].y_unique)
if not fit_spw:
print("Not fitting MethylSPWNet")
primary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)
hidden_caps = []
output_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)
decoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)
model = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)
if test_methyl_array and predict:
model.load_state_dict(torch.load('capsnet_model.pkl'))
else:
print("Fitting MethylSPWNet")
module_lens=[len(x) for x in final_modules]
model=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)
if test_methyl_array and predict:
model.load_state_dict(torch.load('spwnet_model.pkl'))
if torch.cuda.is_available():
model=model.cuda()
# extract all c_ij for all layers across all batches, or just last batch
if l1_l2 and fit_spw:
l1,l2=list(map(float,l1_l2.split(',')))
elif fit_spw:
l1,l2=0.,0.
trainer=Trainer(model=model,
validation_dataloader=dataloaders['val'],
n_epochs=n_epochs,
lr=learning_rate,
n_primary=n_primary,
custom_loss=custom_loss,
gamma2=gamma2,
spw_mode=fit_spw,
l1=l1 if fit_spw else 0.,
l2=l2 if fit_spw else 0.)
if not predict:
try:
#assert 1==2
trainer.fit(dataloader=dataloaders['train'])
val_loss=min(trainer.val_losses)
torch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')
if fit_spw:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')
torch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')
except Exception as e:
print(e)
val_loss=-2
with sqlite3.connect('jobs.db', check_same_thread=False) as conn:
pd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')
else:
if test_methyl_array:
trainer.weights=1.
Y=trainer.predict(dataloaders['test'])
pickle.dump(Y,open('predictions.pkl','wb'))
val_loss=-1
#print(val_loss)
# print([min(trainer.val_losses),n_epochs,
# n_bins,
# bin_len,
# min_capsule_len,
# primary_caps_out_len,
# caps_out_len,
# hidden_topology,
# gamma,
# decoder_topology,
# learning_rate,
# routing_iterations])
return val_loss
|
|
lib.rs
|
#![allow(clippy::module_inception)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::ptr_arg)]
#![allow(clippy::large_enum_variant)]
#![doc = "generated by AutoRust 0.1.0"]
#[cfg(feature = "package-2021-03-12-preview")]
pub mod package_2021_03_12_preview;
#[cfg(all(feature = "package-2021-03-12-preview", not(feature = "no-default-version")))]
pub use package_2021_03_12_preview::{models, operations, operations::Error};
#[cfg(feature = "package-2021-06-15-preview")]
pub mod package_2021_06_15_preview;
use azure_core::setters;
#[cfg(all(feature = "package-2021-06-15-preview", not(feature = "no-default-version")))]
pub use package_2021_06_15_preview::{models, operations, operations::Error};
pub fn config(
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
token_credential: Box<dyn azure_core::TokenCredential>,
) -> OperationConfigBuilder {
OperationConfigBuilder {
http_client,
base_path: None,
token_credential,
token_credential_resource: None,
}
}
pub struct
|
{
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: Option<String>,
token_credential: Box<dyn azure_core::TokenCredential>,
token_credential_resource: Option<String>,
}
impl OperationConfigBuilder {
setters! { base_path : String => Some (base_path) , token_credential_resource : String => Some (token_credential_resource) , }
pub fn build(self) -> OperationConfig {
OperationConfig {
http_client: self.http_client,
base_path: self.base_path.unwrap_or_else(|| "https://management.azure.com".to_owned()),
token_credential: Some(self.token_credential),
token_credential_resource: self
.token_credential_resource
.unwrap_or_else(|| "https://management.azure.com/".to_owned()),
}
}
}
pub struct OperationConfig {
http_client: std::sync::Arc<dyn azure_core::HttpClient>,
base_path: String,
token_credential: Option<Box<dyn azure_core::TokenCredential>>,
token_credential_resource: String,
}
impl OperationConfig {
pub fn http_client(&self) -> &dyn azure_core::HttpClient {
self.http_client.as_ref()
}
pub fn base_path(&self) -> &str {
self.base_path.as_str()
}
pub fn token_credential(&self) -> Option<&dyn azure_core::TokenCredential> {
self.token_credential.as_deref()
}
pub fn token_credential_resource(&self) -> &str {
self.token_credential_resource.as_str()
}
}
|
OperationConfigBuilder
|
dashboard_app.js
|
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import _ from 'lodash';
import React from 'react';
import angular from 'angular';
import { uiModules } from 'ui/modules';
import chrome from 'ui/chrome';
import { applyTheme } from 'ui/theme';
import { toastNotifications } from 'ui/notify';
import 'ui/query_bar';
import { panelActionsStore } from './store/panel_actions_store';
import { getDashboardTitle } from './dashboard_strings';
import { DashboardViewMode } from './dashboard_view_mode';
import { TopNavIds } from './top_nav/top_nav_ids';
import { ConfirmationButtonTypes } from 'ui/modals/confirm_modal';
import { FilterBarQueryFilterProvider } from 'ui/filter_bar/query_filter';
import { DocTitleProvider } from 'ui/doc_title';
import { getTopNavConfig } from './top_nav/get_top_nav_config';
import { DashboardConstants, createDashboardEditUrl } from './dashboard_constants';
import { DashboardStateManager } from './dashboard_state_manager';
import { saveDashboard } from './lib';
import { showCloneModal } from './top_nav/show_clone_modal';
import { showSaveModal } from 'ui/saved_objects/show_saved_object_save_modal';
import { DashboardSaveModal } from './top_nav/save_modal';
import { showAddPanel } from './top_nav/show_add_panel';
import { showOptionsPopover } from './top_nav/show_options_popover';
import { showNewVisModal } from '../visualize/wizard';
import { showShareContextMenu, ShareContextMenuExtensionsRegistryProvider } from 'ui/share';
import { migrateLegacyQuery } from 'ui/utils/migrate_legacy_query';
import * as filterActions from 'ui/doc_table/actions/filter';
import { FilterManagerProvider } from 'ui/filter_manager';
import { EmbeddableFactoriesRegistryProvider } from 'ui/embeddable/embeddable_factories_registry';
import { ContextMenuActionsRegistryProvider } from 'ui/embeddable';
import { VisTypesRegistryProvider } from 'ui/registry/vis_types';
import { timefilter } from 'ui/timefilter';
import { getUnhashableStatesProvider } from 'ui/state_management/state_hashing';
import { DashboardViewportProvider } from './viewport/dashboard_viewport_provider';
const app = uiModules.get('app/dashboard', [
'elasticsearch',
'ngRoute',
'react',
'kibana/courier',
'kibana/config',
]);
app.directive('dashboardViewportProvider', function (reactDirective) {
return reactDirective(DashboardViewportProvider);
});
app.directive('dashboardApp', function ($injector) {
const courier = $injector.get('courier');
const AppState = $injector.get('AppState');
const kbnUrl = $injector.get('kbnUrl');
const confirmModal = $injector.get('confirmModal');
const config = $injector.get('config');
const Private = $injector.get('Private');
return {
restrict: 'E',
controllerAs: 'dashboardApp',
controller: function (
$scope,
$rootScope,
$route,
$routeParams,
getAppState,
dashboardConfig,
localStorage,
i18n,
) {
const filterManager = Private(FilterManagerProvider);
const filterBar = Private(FilterBarQueryFilterProvider);
const docTitle = Private(DocTitleProvider);
const embeddableFactories = Private(EmbeddableFactoriesRegistryProvider);
const panelActionsRegistry = Private(ContextMenuActionsRegistryProvider);
const getUnhashableStates = Private(getUnhashableStatesProvider);
const shareContextMenuExtensions = Private(ShareContextMenuExtensionsRegistryProvider);
panelActionsStore.initializeFromRegistry(panelActionsRegistry);
const visTypes = Private(VisTypesRegistryProvider);
$scope.getEmbeddableFactory = panelType => embeddableFactories.byName[panelType];
const dash = $scope.dash = $route.current.locals.dash;
if (dash.id) {
docTitle.change(dash.title);
}
const dashboardStateManager = new DashboardStateManager({
savedDashboard: dash,
AppState,
hideWriteControls: dashboardConfig.getHideWriteControls(),
addFilter: ({ field, value, operator, index }) => {
filterActions.addFilter(field, value, operator, index, dashboardStateManager.getAppState(), filterManager);
}
});
$scope.getDashboardState = () => dashboardStateManager;
$scope.appState = dashboardStateManager.getAppState();
// The 'previouslyStored' check is so we only update the time filter on dashboard open, not during
// normal cross app navigation.
if (dashboardStateManager.getIsTimeSavedWithDashboard() && !getAppState.previouslyStored()) {
dashboardStateManager.syncTimefilterWithDashboard(timefilter, config.get('timepicker:quickRanges'));
}
const updateState = () => {
// Following the "best practice" of always have a '.' in your ng-models –
// https://github.com/angular/angular.js/wiki/Understanding-Scopes
$scope.model = {
query: dashboardStateManager.getQuery(),
timeRestore: dashboardStateManager.getTimeRestore(),
title: dashboardStateManager.getTitle(),
description: dashboardStateManager.getDescription(),
};
$scope.panels = dashboardStateManager.getPanels();
$scope.indexPatterns = dashboardStateManager.getPanelIndexPatterns();
};
// Part of the exposed plugin API - do not remove without careful consideration.
this.appStatus = {
dirty: !dash.id
};
dashboardStateManager.registerChangeListener(status => {
this.appStatus.dirty = status.dirty || !dash.id;
updateState();
});
dashboardStateManager.applyFilters(
dashboardStateManager.getQuery() || {
query: '',
language: localStorage.get('kibana.userQueryLanguage') || config.get('search:queryLanguage')
},
filterBar.getFilters()
);
timefilter.enableAutoRefreshSelector();
timefilter.enableTimeRangeSelector();
updateState();
$scope.refresh = () => {
$rootScope.$broadcast('fetch');
courier.fetch();
};
dashboardStateManager.handleTimeChange(timefilter.getTime());
$scope.expandedPanel = null;
$scope.dashboardViewMode = dashboardStateManager.getViewMode();
$scope.landingPageUrl = () => `#${DashboardConstants.LANDING_PAGE_PATH}`;
$scope.hasExpandedPanel = () => $scope.expandedPanel !== null;
$scope.getDashTitle = () => getDashboardTitle(
dashboardStateManager.getTitle(),
dashboardStateManager.getViewMode(),
dashboardStateManager.getIsDirty(timefilter));
// Push breadcrumbs to new header navigation
const updateBreadcrumbs = () => {
chrome.breadcrumbs.set([
{
text: i18n('kbn.dashboard.dashboardAppBreadcrumbsTitle', {
defaultMessage: 'Dashboard',
}),
href: $scope.landingPageUrl()
},
{ text: $scope.getDashTitle() }
]);
};
updateBreadcrumbs();
dashboardStateManager.registerChangeListener(updateBreadcrumbs);
config.watch('k7design', (val) => $scope.showPluginBreadcrumbs = !val);
$scope.newDashboard = () => { kbnUrl.change(DashboardConstants.CREATE_NEW_DASHBOARD_URL, {}); };
$scope.saveState = () => dashboardStateManager.saveState();
$scope.getShouldShowEditHelp = () => (
!dashboardStateManager.getPanels().length &&
dashboardStateManager.getIsEditMode() &&
!dashboardConfig.getHideWriteControls()
);
$scope.getShouldShowViewHelp = () => (
!dashboardStateManager.getPanels().length &&
dashboardStateManager.getIsViewMode() &&
!dashboardConfig.getHideWriteControls()
);
$scope.minimizeExpandedPanel = () => {
$scope.expandedPanel = null;
};
$scope.expandPanel = (panelIndex) => {
$scope.expandedPanel =
dashboardStateManager.getPanels().find((panel) => panel.panelIndex === panelIndex);
};
$scope.updateQueryAndFetch = function (query) {
const oldQuery = $scope.model.query;
if (_.isEqual(oldQuery, query)) {
// The user can still request a reload in the query bar, even if the
// query is the same, and in that case, we have to explicitly ask for
// a reload, since no state changes will cause it.
dashboardStateManager.requestReload();
} else {
$scope.model.query = migrateLegacyQuery(query);
dashboardStateManager.applyFilters($scope.model.query, filterBar.getFilters());
}
$scope.refresh();
};
updateTheme();
$scope.indexPatterns = [];
$scope.onPanelRemoved = (panelIndex) => {
dashboardStateManager.removePanel(panelIndex);
$scope.indexPatterns = dashboardStateManager.getPanelIndexPatterns();
};
$scope.$watch('model.query', $scope.updateQueryAndFetch);
$scope.$listenAndDigestAsync(timefilter, 'fetch', () => {
dashboardStateManager.handleTimeChange(timefilter.getTime());
// Currently discover relies on this logic to re-fetch. We need to refactor it to rely instead on the
// directly passed down time filter. Then we can get rid of this reliance on scope broadcasts.
$scope.refresh();
});
function updateViewMode(newMode) {
$scope.topNavMenu = getTopNavConfig(newMode, navActions, dashboardConfig.getHideWriteControls()); // eslint-disable-line no-use-before-define
dashboardStateManager.switchViewMode(newMode);
$scope.dashboardViewMode = newMode;
}
const onChangeViewMode = (newMode) => {
const isPageRefresh = newMode === dashboardStateManager.getViewMode();
const isLeavingEditMode = !isPageRefresh && newMode === DashboardViewMode.VIEW;
const willLoseChanges = isLeavingEditMode && dashboardStateManager.getIsDirty(timefilter);
if (!willLoseChanges) {
updateViewMode(newMode);
return;
}
function re
|
{
dashboardStateManager.resetState();
kbnUrl.change(dash.id ? createDashboardEditUrl(dash.id) : DashboardConstants.CREATE_NEW_DASHBOARD_URL);
// This is only necessary for new dashboards, which will default to Edit mode.
updateViewMode(DashboardViewMode.VIEW);
// We need to do a hard reset of the timepicker. appState will not reload like
// it does on 'open' because it's been saved to the url and the getAppState.previouslyStored() check on
// reload will cause it not to sync.
if (dashboardStateManager.getIsTimeSavedWithDashboard()) {
dashboardStateManager.syncTimefilterWithDashboard(timefilter, config.get('timepicker:quickRanges'));
}
}
confirmModal(
i18n('kbn.dashboard.changeViewModeConfirmModal.discardChangesDescription',
{ defaultMessage: `Once you discard your changes, there's no getting them back.` }
),
{
onConfirm: revertChangesAndExitEditMode,
onCancel: _.noop,
confirmButtonText: i18n('kbn.dashboard.changeViewModeConfirmModal.confirmButtonLabel',
{ defaultMessage: 'Discard changes' }
),
cancelButtonText: i18n('kbn.dashboard.changeViewModeConfirmModal.cancelButtonLabel',
{ defaultMessage: 'Continue editing' }
),
defaultFocusedButton: ConfirmationButtonTypes.CANCEL,
title: i18n('kbn.dashboard.changeViewModeConfirmModal.discardChangesTitle',
{ defaultMessage: 'Discard changes to dashboard?' }
)
}
);
};
/**
* Saves the dashboard.
*
* @param {object} [saveOptions={}]
* @property {boolean} [saveOptions.confirmOverwrite=false] - If true, attempts to create the source so it
* can confirm an overwrite if a document with the id already exists.
* @property {boolean} [saveOptions.isTitleDuplicateConfirmed=false] - If true, save allowed with duplicate title
* @property {func} [saveOptions.onTitleDuplicate] - function called if duplicate title exists.
* When not provided, confirm modal will be displayed asking user to confirm or cancel save.
* @return {Promise}
* @resolved {String} - The id of the doc
*/
function save(saveOptions) {
return saveDashboard(angular.toJson, timefilter, dashboardStateManager, saveOptions)
.then(function (id) {
if (id) {
toastNotifications.addSuccess({
title: i18n('kbn.dashboard.dashboardWasSavedSuccessMessage',
{
defaultMessage: `Dashboard '{dashTitle}' was saved`,
values: { dashTitle: dash.title },
},
),
'data-test-subj': 'saveDashboardSuccess',
});
if (dash.id !== $routeParams.id) {
kbnUrl.change(createDashboardEditUrl(dash.id));
} else {
docTitle.change(dash.lastSavedTitle);
updateViewMode(DashboardViewMode.VIEW);
}
}
return { id };
}).catch((error) => {
toastNotifications.addDanger({
title: i18n('kbn.dashboard.dashboardWasNotSavedDangerMessage',
{
defaultMessage: `Dashboard '{dashTitle}' was not saved. Error: {errorMessage}`,
values: {
dashTitle: dash.title,
errorMessage: error.message,
},
},
),
'data-test-subj': 'saveDashboardFailure',
});
return { error };
});
}
$scope.showFilterBar = () => filterBar.getFilters().length > 0 || !dashboardStateManager.getFullScreenMode();
$scope.showAddPanel = () => {
dashboardStateManager.setFullScreenMode(false);
$scope.kbnTopNav.click(TopNavIds.ADD);
};
$scope.enterEditMode = () => {
dashboardStateManager.setFullScreenMode(false);
$scope.kbnTopNav.click('edit');
};
const navActions = {};
navActions[TopNavIds.FULL_SCREEN] = () =>
dashboardStateManager.setFullScreenMode(true);
navActions[TopNavIds.EXIT_EDIT_MODE] = () => onChangeViewMode(DashboardViewMode.VIEW);
navActions[TopNavIds.ENTER_EDIT_MODE] = () => onChangeViewMode(DashboardViewMode.EDIT);
navActions[TopNavIds.SAVE] = () => {
const currentTitle = dashboardStateManager.getTitle();
const currentDescription = dashboardStateManager.getDescription();
const currentTimeRestore = dashboardStateManager.getTimeRestore();
const onSave = ({ newTitle, newDescription, newCopyOnSave, newTimeRestore, isTitleDuplicateConfirmed, onTitleDuplicate }) => {
dashboardStateManager.setTitle(newTitle);
dashboardStateManager.setDescription(newDescription);
dashboardStateManager.savedDashboard.copyOnSave = newCopyOnSave;
dashboardStateManager.setTimeRestore(newTimeRestore);
const saveOptions = {
confirmOverwrite: false,
isTitleDuplicateConfirmed,
onTitleDuplicate,
};
return save(saveOptions).then(({ id, error }) => {
// If the save wasn't successful, put the original values back.
if (!id || error) {
dashboardStateManager.setTitle(currentTitle);
dashboardStateManager.setDescription(currentDescription);
dashboardStateManager.setTimeRestore(currentTimeRestore);
}
return { id, error };
});
};
const dashboardSaveModal = (
<DashboardSaveModal
onSave={onSave}
onClose={() => {}}
title={currentTitle}
description={currentDescription}
timeRestore={currentTimeRestore}
showCopyOnSave={dash.id ? true : false}
/>
);
showSaveModal(dashboardSaveModal);
};
navActions[TopNavIds.CLONE] = () => {
const currentTitle = dashboardStateManager.getTitle();
const onClone = (newTitle, isTitleDuplicateConfirmed, onTitleDuplicate) => {
dashboardStateManager.savedDashboard.copyOnSave = true;
dashboardStateManager.setTitle(newTitle);
const saveOptions = {
confirmOverwrite: false,
isTitleDuplicateConfirmed,
onTitleDuplicate,
};
return save(saveOptions).then(({ id, error }) => {
// If the save wasn't successful, put the original title back.
if (!id || error) {
dashboardStateManager.setTitle(currentTitle);
}
return { id, error };
});
};
showCloneModal(onClone, currentTitle);
};
navActions[TopNavIds.ADD] = () => {
const addNewVis = () => {
showNewVisModal(visTypes, { editorParams: [DashboardConstants.ADD_VISUALIZATION_TO_DASHBOARD_MODE_PARAM] });
};
showAddPanel(dashboardStateManager.addNewPanel, addNewVis, visTypes);
};
navActions[TopNavIds.OPTIONS] = (menuItem, navController, anchorElement) => {
showOptionsPopover({
anchorElement,
darkTheme: dashboardStateManager.getDarkTheme(),
onDarkThemeChange: (isChecked) => {
dashboardStateManager.setDarkTheme(isChecked);
updateTheme();
},
useMargins: dashboardStateManager.getUseMargins(),
onUseMarginsChange: (isChecked) => {
dashboardStateManager.setUseMargins(isChecked);
},
hidePanelTitles: dashboardStateManager.getHidePanelTitles(),
onHidePanelTitlesChange: (isChecked) => {
dashboardStateManager.setHidePanelTitles(isChecked);
},
});
};
navActions[TopNavIds.SHARE] = (menuItem, navController, anchorElement) => {
showShareContextMenu({
anchorElement,
allowEmbed: true,
getUnhashableStates,
objectId: dash.id,
objectType: 'dashboard',
shareContextMenuExtensions,
sharingData: {
title: dash.title,
},
isDirty: dashboardStateManager.getIsDirty(),
});
};
updateViewMode(dashboardStateManager.getViewMode());
// update root source when filters update
$scope.$listen(filterBar, 'update', function () {
dashboardStateManager.applyFilters($scope.model.query, filterBar.getFilters());
});
// update data when filters fire fetch event
$scope.$listen(filterBar, 'fetch', $scope.refresh);
$scope.$on('$destroy', () => {
dashboardStateManager.destroy();
// Remove dark theme to keep it from affecting the appearance of other apps.
setLightTheme();
});
function updateTheme() {
dashboardStateManager.getDarkTheme() ? setDarkTheme() : setLightTheme();
}
function setDarkTheme() {
chrome.removeApplicationClass(['theme-light']);
chrome.addApplicationClass('theme-dark');
applyTheme('dark');
}
function setLightTheme() {
chrome.removeApplicationClass(['theme-dark']);
chrome.addApplicationClass('theme-light');
applyTheme('light');
}
if ($route.current.params && $route.current.params[DashboardConstants.NEW_VISUALIZATION_ID_PARAM]) {
dashboardStateManager.addNewPanel($route.current.params[DashboardConstants.NEW_VISUALIZATION_ID_PARAM], 'visualization');
kbnUrl.removeParam(DashboardConstants.ADD_VISUALIZATION_TO_DASHBOARD_MODE_PARAM);
kbnUrl.removeParam(DashboardConstants.NEW_VISUALIZATION_ID_PARAM);
}
}
};
});
|
vertChangesAndExitEditMode()
|
main.rs
|
fn main() {
let year = 2019;
|
if (year % 4) == 0{
println!("this year is a leap year");
}
else {
println!("this year isn't a leap year");
}
}
| |
MaterialProperty.js
|
/*global define*/
define([
'../Core/Color',
'../Core/defined',
'../Core/defineProperties',
'../Core/DeveloperError',
'../Scene/Material'
], function(
Color,
defined,
defineProperties,
DeveloperError,
Material) {
'use strict';
/**
* The interface for all {@link Property} objects that represent {@link Material} uniforms.
* This type defines an interface and cannot be instantiated directly.
*
* @alias MaterialProperty
* @constructor
*
* @see ColorMaterialProperty
* @see CompositeMaterialProperty
* @see GridMaterialProperty
* @see ImageMaterialProperty
* @see PolylineGlowMaterialProperty
* @see PolylineOutlineMaterialProperty
* @see StripeMaterialProperty
*/
function
|
() {
DeveloperError.throwInstantiationError();
}
defineProperties(MaterialProperty.prototype, {
/**
* Gets a value indicating if this property is constant. A property is considered
* constant if getValue always returns the same result for the current definition.
* @memberof MaterialProperty.prototype
*
* @type {Boolean}
* @readonly
*/
isConstant : {
get : DeveloperError.throwInstantiationError
},
/**
* Gets the event that is raised whenever the definition of this property changes.
* The definition is considered to have changed if a call to getValue would return
* a different result for the same time.
* @memberof MaterialProperty.prototype
*
* @type {Event}
* @readonly
*/
definitionChanged : {
get : DeveloperError.throwInstantiationError
}
});
/**
* Gets the {@link Material} type at the provided time.
* @function
*
* @param {JulianDate} time The time for which to retrieve the type.
* @returns {String} The type of material.
*/
MaterialProperty.prototype.getType = DeveloperError.throwInstantiationError;
/**
* Gets the value of the property at the provided time.
* @function
*
* @param {JulianDate} time The time for which to retrieve the value.
* @param {Object} [result] The object to store the value into, if omitted, a new instance is created and returned.
* @returns {Object} The modified result parameter or a new instance if the result parameter was not supplied.
*/
MaterialProperty.prototype.getValue = DeveloperError.throwInstantiationError;
/**
* Compares this property to the provided property and returns
* <code>true</code> if they are equal, <code>false</code> otherwise.
* @function
*
* @param {Property} [other] The other property.
* @returns {Boolean} <code>true</code> if left and right are equal, <code>false</code> otherwise.
*/
MaterialProperty.prototype.equals = DeveloperError.throwInstantiationError;
/**
* @private
*/
MaterialProperty.getValue = function(time, materialProperty, material) {
var type;
if (defined(materialProperty)) {
type = materialProperty.getType(time);
if (defined(type)) {
if (!defined(material) || (material.type !== type)) {
material = Material.fromType(type);
}
materialProperty.getValue(time, material.uniforms);
return material;
}
}
if (!defined(material) || (material.type !== Material.ColorType)) {
material = Material.fromType(Material.ColorType);
}
Color.clone(Color.WHITE, material.uniforms.color);
return material;
};
return MaterialProperty;
});
|
MaterialProperty
|
main.rs
|
use std::fs;
use regex::Regex;
fn count_chars(test_input: &str) -> i32 {
test_input
.split('\n')
.map(|s| s.chars().count() as i32)
.sum()
}
fn unescape(test_input: &str) -> String {
let escape_chars = ["\\", "\""];
test_input
.split('\n')
.map(|s| {
if s.len() < 2 {
"".to_string()
} else {
let end = s.len() - 1;
let new_s = &s[1..end].replace("\\\\", "..");
new_s
.split('\\')
.enumerate()
.map(|(i, word)| {
if i == 0 || escape_chars.contains(&word) {
word.to_string()
} else {
if word.starts_with('x') {
let ord = u8::from_str_radix(&word[1..3], 16)
.unwrap();
let c = ord as char;
let mut v = word
.chars()
.collect::<Vec<char>>();
v.splice(0..3, c.to_string().chars());
v.iter().collect::<String>()
} else {
word.to_string()
}
}
})
.collect::<Vec<String>>()
.join("")
.replace("..", "\\")
}
})
.collect::<Vec<String>>()
.join("\n")
}
fn escape(test_input: &str) -> String {
let re = Regex::new(r"\\(?P<hex>x[0-9a-f]{2})").unwrap();
test_input
.split('\n')
.map(|s| {
if s.len() > 0 {
let mut s1 = s
.replace("\\\\", "--------")
.replace("\\\"", "----....")
.replace("\\\\", "--------")
.replace("\\\\", "--------");
s1 = re.replace_all(&s1, "----$hex").to_string();
let mut s2 = String::from("\"....");
s2.push_str(&s1[1..s1.len()-1]);
s2.push_str(&"....\"");
s2 = s2
.replace("----", "\\\\")
.replace("....", "\\\"");
s2.to_string()
} else {
s.to_string()
}
})
.collect::<Vec<String>>()
.join("\n")
}
fn solve_part1(puzzle_input: &str) -> i32 {
count_chars(&puzzle_input) - count_chars(&unescape(&puzzle_input))
}
fn solve_part2(puzzle_input: &str) -> i32 {
count_chars(&escape(&puzzle_input)) - count_chars(&puzzle_input)
}
fn main() {
let puzzle_input = fs::read_to_string("input/input.txt")
.expect("problem reading file");
let result1 = solve_part1(&puzzle_input);
println!("Part1: {}", result1);
let result2 = solve_part2(&puzzle_input);
println!("Part2: {}", result2);
}
// Tests
#[cfg(test)]
mod tests {
use super::*;
fn example_input() -> String {
String::from(
"\"\"\n\
\"abc\"\n\
\"aaa\\\"aaa\"\n\
\"\\x27\"\n"
)
}
fn create_test_input() -> String {
String::from(
"\"\"\n\
\"a\"\n\
\"abc\"\n\
\"aaa\\\"aaa\"\n\
\"bbb\\\\bbb\"\n\
\"\\x27\"\n"
)
}
#[test]
fn test_count_chars() {
let test_input = create_test_input();
let num_chars = count_chars(&test_input);
assert_eq!(num_chars, 36);
}
#[test]
fn test_count_chars_with_example_input() {
let test_input = example_input();
let num_chars = count_chars(&test_input);
assert_eq!(num_chars, 23);
}
#[test]
fn test_count_chars_with() {
let test_input = create_test_input();
let num_chars = count_chars(&test_input);
assert_eq!(num_chars, 36);
}
#[test]
fn
|
() {
let test_input = create_test_input();
let unescaped = unescape(&test_input);
assert_eq!(unescaped, "\na\nabc\naaa\"aaa\nbbb\\bbb\n'\n");
assert_eq!(count_chars(&unescaped), 19);
}
#[test]
fn test_escape() {
let test_input = create_test_input();
let escaped = escape(&test_input);
let expected = "\"\\\"\\\"\"\n\
\"\\\"a\\\"\"\n\
\"\\\"abc\\\"\"\n\
\"\\\"aaa\\\\\\\"aaa\\\"\"\n\
\"\\\"bbb\\\\\\\\bbb\\\"\"\n\
\"\\\"\\\\x27\\\"\"\n";
assert_eq!(escaped, expected);
assert_eq!(count_chars(&escaped), 65);
}
#[test]
fn test_part1() {
let test_input = create_test_input();
assert_eq!(solve_part1(&test_input), 17);
}
#[test]
fn test_part1_with_example_input() {
let test_input = example_input();
assert_eq!(solve_part1(&test_input), 12);
}
#[test]
fn test_part2() {
let test_input = create_test_input();
assert_eq!(solve_part2(&test_input), 29);
}
#[test]
fn test_part2_with_example_input() {
let test_input = example_input();
assert_eq!(solve_part2(&test_input), 19);
}
}
|
test_unescape
|
main.go
|
package stellarbase
import (
"github.com/stellar/go-stellar-base/strkey"
"github.com/stellar/go-stellar-base/xdr"
)
//go:generate rake xdr:update
//go:generate go fmt ./xdr
// One is the value of one whole unit of currency. Stellar uses 7 fixed digits
// for fractional values, thus One is 10 million (10^7)
const One = 10000000
// AddressToAccountId converts the provided address into a xdr.AccountId
func
|
(address string) (result xdr.AccountId, err error) {
bytes, err := strkey.Decode(strkey.VersionByteAccountID, address)
if err != nil {
return
}
var raw xdr.Uint256
copy(raw[:], bytes)
pk, err := xdr.NewPublicKey(xdr.CryptoKeyTypeKeyTypeEd25519, raw)
if err != nil {
return
}
result = xdr.AccountId(pk)
return
}
|
AddressToAccountId
|
api.go
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package snowball
import (
"fmt"
"time"
"github.com/aavshr/aws-sdk-go/aws"
"github.com/aavshr/aws-sdk-go/aws/awsutil"
"github.com/aavshr/aws-sdk-go/aws/request"
"github.com/aavshr/aws-sdk-go/private/protocol"
"github.com/aavshr/aws-sdk-go/private/protocol/jsonrpc"
)
const opCancelCluster = "CancelCluster"
// CancelClusterRequest generates a "aws/request.Request" representing the
// client's request for the CancelCluster operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CancelCluster for more information on using the CancelCluster
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CancelClusterRequest method.
// req, resp := client.CancelClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CancelCluster
func (c *Snowball) CancelClusterRequest(input *CancelClusterInput) (req *request.Request, output *CancelClusterOutput) {
op := &request.Operation{
Name: opCancelCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CancelClusterInput{}
}
output = &CancelClusterOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// CancelCluster API operation for Amazon Import/Export Snowball.
//
// Cancels a cluster job. You can only cancel a cluster job while it's in the
// AwaitingQuorum status. You'll have at least an hour after creating a cluster
// job to cancel it.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CancelCluster for usage and error information.
//
// Returned Error Types:
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CancelCluster
func (c *Snowball) CancelCluster(input *CancelClusterInput) (*CancelClusterOutput, error) {
req, out := c.CancelClusterRequest(input)
return out, req.Send()
}
// CancelClusterWithContext is the same as CancelCluster with the addition of
// the ability to pass a context and additional request options.
//
// See CancelCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CancelClusterWithContext(ctx aws.Context, input *CancelClusterInput, opts ...request.Option) (*CancelClusterOutput, error) {
req, out := c.CancelClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCancelJob = "CancelJob"
// CancelJobRequest generates a "aws/request.Request" representing the
// client's request for the CancelJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CancelJob for more information on using the CancelJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CancelJobRequest method.
// req, resp := client.CancelJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CancelJob
func (c *Snowball) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) {
op := &request.Operation{
Name: opCancelJob,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CancelJobInput{}
}
output = &CancelJobOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// CancelJob API operation for Amazon Import/Export Snowball.
//
// Cancels the specified job. You can only cancel a job before its JobState
// value changes to PreparingAppliance. Requesting the ListJobs or DescribeJob
// action returns a job's JobState as part of the response element data returned.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CancelJob for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CancelJob
func (c *Snowball) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) {
req, out := c.CancelJobRequest(input)
return out, req.Send()
}
// CancelJobWithContext is the same as CancelJob with the addition of
// the ability to pass a context and additional request options.
//
// See CancelJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CancelJobWithContext(ctx aws.Context, input *CancelJobInput, opts ...request.Option) (*CancelJobOutput, error) {
req, out := c.CancelJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateAddress = "CreateAddress"
// CreateAddressRequest generates a "aws/request.Request" representing the
// client's request for the CreateAddress operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateAddress for more information on using the CreateAddress
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateAddressRequest method.
// req, resp := client.CreateAddressRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateAddress
func (c *Snowball) CreateAddressRequest(input *CreateAddressInput) (req *request.Request, output *CreateAddressOutput) {
op := &request.Operation{
Name: opCreateAddress,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateAddressInput{}
}
output = &CreateAddressOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateAddress API operation for Amazon Import/Export Snowball.
//
// Creates an address for a Snow device to be shipped to. In most regions, addresses
// are validated at the time of creation. The address you provide must be located
// within the serviceable area of your region. If the address is invalid or
// unsupported, then an exception is thrown.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CreateAddress for usage and error information.
//
// Returned Error Types:
// * InvalidAddressException
// The address provided was invalid. Check the address with your region's carrier,
// and try again.
//
// * UnsupportedAddressException
// The address is either outside the serviceable area for your region, or an
// error occurred. Check the address with your region's carrier and try again.
// If the issue persists, contact AWS Support.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateAddress
func (c *Snowball) CreateAddress(input *CreateAddressInput) (*CreateAddressOutput, error) {
req, out := c.CreateAddressRequest(input)
return out, req.Send()
}
// CreateAddressWithContext is the same as CreateAddress with the addition of
// the ability to pass a context and additional request options.
//
// See CreateAddress for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CreateAddressWithContext(ctx aws.Context, input *CreateAddressInput, opts ...request.Option) (*CreateAddressOutput, error) {
req, out := c.CreateAddressRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateCluster = "CreateCluster"
// CreateClusterRequest generates a "aws/request.Request" representing the
// client's request for the CreateCluster operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateCluster for more information on using the CreateCluster
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateClusterRequest method.
// req, resp := client.CreateClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateCluster
func (c *Snowball) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) {
op := &request.Operation{
Name: opCreateCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateClusterInput{}
}
output = &CreateClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateCluster API operation for Amazon Import/Export Snowball.
//
// Creates an empty cluster. Each cluster supports five nodes. You use the CreateJob
// action separately to create the jobs for each of these nodes. The cluster
// does not ship until these five node jobs have been created.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CreateCluster for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// * InvalidInputCombinationException
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
//
// * Ec2RequestFailedException
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateCluster
func (c *Snowball) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) {
req, out := c.CreateClusterRequest(input)
return out, req.Send()
}
// CreateClusterWithContext is the same as CreateCluster with the addition of
// the ability to pass a context and additional request options.
//
// See CreateCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CreateClusterWithContext(ctx aws.Context, input *CreateClusterInput, opts ...request.Option) (*CreateClusterOutput, error) {
req, out := c.CreateClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateJob = "CreateJob"
// CreateJobRequest generates a "aws/request.Request" representing the
// client's request for the CreateJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateJob for more information on using the CreateJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateJobRequest method.
// req, resp := client.CreateJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateJob
func (c *Snowball) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobOutput) {
op := &request.Operation{
Name: opCreateJob,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateJobInput{}
}
output = &CreateJobOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateJob API operation for Amazon Import/Export Snowball.
//
// Creates a job to import or export data between Amazon S3 and your on-premises
// data center. Your AWS account must have the right trust policies and permissions
// in place to create a job for a Snow device. If you're creating a job for
// a node in a cluster, you only need to provide the clusterId value; the other
// job attributes are inherited from the cluster.
//
// Only the Snowball; Edge device type is supported when ordering clustered
// jobs.
//
// The device capacity is optional.
//
// Availability of device types differ by AWS Region. For more information about
// Region availability, see AWS Regional Services (https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ngi&loc=4).
//
// AWS Snow Family device types and their capacities.
//
// * Snow Family device type: SNC1_SSD Capacity: T14 Description: Snowcone
//
// * Snow Family device type: SNC1_HDD Capacity: T8 Description: Snowcone
//
// * Device type: EDGE_S Capacity: T98 Description: Snowball Edge Storage
// Optimized for data transfer only
//
// * Device type: EDGE_CG Capacity: T42 Description: Snowball Edge Compute
// Optimized with GPU
//
// * Device type: EDGE_C Capacity: T42 Description: Snowball Edge Compute
// Optimized without GPU
//
// * Device type: EDGE Capacity: T100 Description: Snowball Edge Storage
// Optimized with EC2 Compute
//
// * Device type: STANDARD Capacity: T50 Description: Original Snowball device
// This device is only available in the Ningxia, Beijing, and Singapore AWS
// Regions.
//
// * Device type: STANDARD Capacity: T80 Description: Original Snowball device
// This device is only available in the Ningxia, Beijing, and Singapore AWS
// Regions.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CreateJob for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// * InvalidInputCombinationException
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
//
// * ClusterLimitExceededException
// Job creation failed. Currently, clusters support five nodes. If you have
// fewer than five nodes for your cluster and you have more nodes to create
// for this cluster, try again and create jobs until your cluster has exactly
// five nodes.
//
// * Ec2RequestFailedException
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateJob
func (c *Snowball) CreateJob(input *CreateJobInput) (*CreateJobOutput, error) {
req, out := c.CreateJobRequest(input)
return out, req.Send()
}
// CreateJobWithContext is the same as CreateJob with the addition of
// the ability to pass a context and additional request options.
//
// See CreateJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts ...request.Option) (*CreateJobOutput, error) {
req, out := c.CreateJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateLongTermPricing = "CreateLongTermPricing"
// CreateLongTermPricingRequest generates a "aws/request.Request" representing the
// client's request for the CreateLongTermPricing operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateLongTermPricing for more information on using the CreateLongTermPricing
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateLongTermPricingRequest method.
// req, resp := client.CreateLongTermPricingRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateLongTermPricing
func (c *Snowball) CreateLongTermPricingRequest(input *CreateLongTermPricingInput) (req *request.Request, output *CreateLongTermPricingOutput) {
op := &request.Operation{
Name: opCreateLongTermPricing,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateLongTermPricingInput{}
}
output = &CreateLongTermPricingOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateLongTermPricing API operation for Amazon Import/Export Snowball.
//
// Creates a job with the long-term usage option for a device. The long-term
// usage is a 1-year or 3-year long-term pricing type for the device. You are
// billed upfront, and AWS provides discounts for long-term pricing.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CreateLongTermPricing for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateLongTermPricing
func (c *Snowball) CreateLongTermPricing(input *CreateLongTermPricingInput) (*CreateLongTermPricingOutput, error) {
req, out := c.CreateLongTermPricingRequest(input)
return out, req.Send()
}
// CreateLongTermPricingWithContext is the same as CreateLongTermPricing with the addition of
// the ability to pass a context and additional request options.
//
// See CreateLongTermPricing for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CreateLongTermPricingWithContext(ctx aws.Context, input *CreateLongTermPricingInput, opts ...request.Option) (*CreateLongTermPricingOutput, error) {
req, out := c.CreateLongTermPricingRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateReturnShippingLabel = "CreateReturnShippingLabel"
// CreateReturnShippingLabelRequest generates a "aws/request.Request" representing the
// client's request for the CreateReturnShippingLabel operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateReturnShippingLabel for more information on using the CreateReturnShippingLabel
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateReturnShippingLabelRequest method.
// req, resp := client.CreateReturnShippingLabelRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateReturnShippingLabel
func (c *Snowball) CreateReturnShippingLabelRequest(input *CreateReturnShippingLabelInput) (req *request.Request, output *CreateReturnShippingLabelOutput) {
op := &request.Operation{
Name: opCreateReturnShippingLabel,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateReturnShippingLabelInput{}
}
output = &CreateReturnShippingLabelOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateReturnShippingLabel API operation for Amazon Import/Export Snowball.
//
// Creates a shipping label that will be used to return the Snow device to AWS.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation CreateReturnShippingLabel for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * InvalidInputCombinationException
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
//
// * ConflictException
// You get this exception when you call CreateReturnShippingLabel more than
// once when other requests are not completed.
//
// * ReturnShippingLabelAlreadyExistsException
// You get this exception if you call CreateReturnShippingLabel and a valid
// return shipping label already exists. In this case, use DescribeReturnShippingLabel
// to get the url.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/CreateReturnShippingLabel
func (c *Snowball) CreateReturnShippingLabel(input *CreateReturnShippingLabelInput) (*CreateReturnShippingLabelOutput, error) {
req, out := c.CreateReturnShippingLabelRequest(input)
return out, req.Send()
}
// CreateReturnShippingLabelWithContext is the same as CreateReturnShippingLabel with the addition of
// the ability to pass a context and additional request options.
//
// See CreateReturnShippingLabel for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) CreateReturnShippingLabelWithContext(ctx aws.Context, input *CreateReturnShippingLabelInput, opts ...request.Option) (*CreateReturnShippingLabelOutput, error) {
req, out := c.CreateReturnShippingLabelRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeAddress = "DescribeAddress"
// DescribeAddressRequest generates a "aws/request.Request" representing the
// client's request for the DescribeAddress operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeAddress for more information on using the DescribeAddress
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeAddressRequest method.
// req, resp := client.DescribeAddressRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddress
func (c *Snowball) DescribeAddressRequest(input *DescribeAddressInput) (req *request.Request, output *DescribeAddressOutput) {
op := &request.Operation{
Name: opDescribeAddress,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeAddressInput{}
}
output = &DescribeAddressOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeAddress API operation for Amazon Import/Export Snowball.
//
// Takes an AddressId and returns specific details about that address in the
// form of an Address object.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation DescribeAddress for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddress
func (c *Snowball) DescribeAddress(input *DescribeAddressInput) (*DescribeAddressOutput, error) {
req, out := c.DescribeAddressRequest(input)
return out, req.Send()
}
// DescribeAddressWithContext is the same as DescribeAddress with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeAddress for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeAddressWithContext(ctx aws.Context, input *DescribeAddressInput, opts ...request.Option) (*DescribeAddressOutput, error) {
req, out := c.DescribeAddressRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeAddresses = "DescribeAddresses"
// DescribeAddressesRequest generates a "aws/request.Request" representing the
// client's request for the DescribeAddresses operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeAddresses for more information on using the DescribeAddresses
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeAddressesRequest method.
// req, resp := client.DescribeAddressesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses
func (c *Snowball) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) {
op := &request.Operation{
Name: opDescribeAddresses,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeAddressesInput{}
}
output = &DescribeAddressesOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeAddresses API operation for Amazon Import/Export Snowball.
//
// Returns a specified number of ADDRESS objects. Calling this API in one of
// the US regions will return addresses from the list of all addresses associated
// with this account in all US regions.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation DescribeAddresses for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeAddresses
func (c *Snowball) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) {
req, out := c.DescribeAddressesRequest(input)
return out, req.Send()
}
// DescribeAddressesWithContext is the same as DescribeAddresses with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeAddresses for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddressesInput, opts ...request.Option) (*DescribeAddressesOutput, error) {
req, out := c.DescribeAddressesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeAddressesPages iterates over the pages of a DescribeAddresses operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeAddresses method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeAddresses operation.
// pageNum := 0
// err := client.DescribeAddressesPages(params,
// func(page *snowball.DescribeAddressesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Snowball) DescribeAddressesPages(input *DescribeAddressesInput, fn func(*DescribeAddressesOutput, bool) bool) error {
return c.DescribeAddressesPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeAddressesPagesWithContext same as DescribeAddressesPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeAddressesPagesWithContext(ctx aws.Context, input *DescribeAddressesInput, fn func(*DescribeAddressesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeAddressesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeAddressesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*DescribeAddressesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opDescribeCluster = "DescribeCluster"
// DescribeClusterRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCluster operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeCluster for more information on using the DescribeCluster
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeClusterRequest method.
// req, resp := client.DescribeClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeCluster
func (c *Snowball) DescribeClusterRequest(input *DescribeClusterInput) (req *request.Request, output *DescribeClusterOutput) {
op := &request.Operation{
Name: opDescribeCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeClusterInput{}
}
output = &DescribeClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCluster API operation for Amazon Import/Export Snowball.
//
// Returns information about a specific cluster including shipping information,
// cluster status, and other important metadata.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation DescribeCluster for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeCluster
func (c *Snowball) DescribeCluster(input *DescribeClusterInput) (*DescribeClusterOutput, error) {
req, out := c.DescribeClusterRequest(input)
return out, req.Send()
}
// DescribeClusterWithContext is the same as DescribeCluster with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeClusterWithContext(ctx aws.Context, input *DescribeClusterInput, opts ...request.Option) (*DescribeClusterOutput, error) {
req, out := c.DescribeClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeJob = "DescribeJob"
// DescribeJobRequest generates a "aws/request.Request" representing the
// client's request for the DescribeJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeJob for more information on using the DescribeJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeJobRequest method.
// req, resp := client.DescribeJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeJob
func (c *Snowball) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *DescribeJobOutput) {
op := &request.Operation{
Name: opDescribeJob,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeJobInput{}
}
output = &DescribeJobOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeJob API operation for Amazon Import/Export Snowball.
//
// Returns information about a specific job including shipping information,
// job status, and other important metadata.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation DescribeJob for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeJob
func (c *Snowball) DescribeJob(input *DescribeJobInput) (*DescribeJobOutput, error) {
req, out := c.DescribeJobRequest(input)
return out, req.Send()
}
// DescribeJobWithContext is the same as DescribeJob with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeJobWithContext(ctx aws.Context, input *DescribeJobInput, opts ...request.Option) (*DescribeJobOutput, error) {
req, out := c.DescribeJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeReturnShippingLabel = "DescribeReturnShippingLabel"
// DescribeReturnShippingLabelRequest generates a "aws/request.Request" representing the
// client's request for the DescribeReturnShippingLabel operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeReturnShippingLabel for more information on using the DescribeReturnShippingLabel
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeReturnShippingLabelRequest method.
// req, resp := client.DescribeReturnShippingLabelRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeReturnShippingLabel
func (c *Snowball) DescribeReturnShippingLabelRequest(input *DescribeReturnShippingLabelInput) (req *request.Request, output *DescribeReturnShippingLabelOutput) {
op := &request.Operation{
Name: opDescribeReturnShippingLabel,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeReturnShippingLabelInput{}
}
output = &DescribeReturnShippingLabelOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeReturnShippingLabel API operation for Amazon Import/Export Snowball.
//
// Information on the shipping label of a Snow device that is being returned
// to AWS.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation DescribeReturnShippingLabel for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * ConflictException
// You get this exception when you call CreateReturnShippingLabel more than
// once when other requests are not completed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/DescribeReturnShippingLabel
func (c *Snowball) DescribeReturnShippingLabel(input *DescribeReturnShippingLabelInput) (*DescribeReturnShippingLabelOutput, error) {
req, out := c.DescribeReturnShippingLabelRequest(input)
return out, req.Send()
}
// DescribeReturnShippingLabelWithContext is the same as DescribeReturnShippingLabel with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeReturnShippingLabel for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) DescribeReturnShippingLabelWithContext(ctx aws.Context, input *DescribeReturnShippingLabelInput, opts ...request.Option) (*DescribeReturnShippingLabelOutput, error) {
req, out := c.DescribeReturnShippingLabelRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetJobManifest = "GetJobManifest"
// GetJobManifestRequest generates a "aws/request.Request" representing the
// client's request for the GetJobManifest operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetJobManifest for more information on using the GetJobManifest
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetJobManifestRequest method.
// req, resp := client.GetJobManifestRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetJobManifest
func (c *Snowball) GetJobManifestRequest(input *GetJobManifestInput) (req *request.Request, output *GetJobManifestOutput) {
op := &request.Operation{
Name: opGetJobManifest,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetJobManifestInput{}
}
output = &GetJobManifestOutput{}
req = c.newRequest(op, input, output)
return
}
// GetJobManifest API operation for Amazon Import/Export Snowball.
//
// Returns a link to an Amazon S3 presigned URL for the manifest file associated
// with the specified JobId value. You can access the manifest file for up to
// 60 minutes after this request has been made. To access the manifest file
// after 60 minutes have passed, you'll have to make another call to the GetJobManifest
// action.
//
// The manifest is an encrypted file that you can download after your job enters
// the WithCustomer status. The manifest is decrypted by using the UnlockCode
// code value, when you pass both values to the Snow device through the Snowball
// client when the client is started for the first time.
//
// As a best practice, we recommend that you don't save a copy of an UnlockCode
// value in the same location as the manifest file for that job. Saving these
// separately helps prevent unauthorized parties from gaining access to the
// Snow device associated with that job.
//
// The credentials of a given job, including its manifest file and unlock code,
// expire 360 days after the job is created.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation GetJobManifest for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetJobManifest
func (c *Snowball) GetJobManifest(input *GetJobManifestInput) (*GetJobManifestOutput, error) {
req, out := c.GetJobManifestRequest(input)
return out, req.Send()
}
// GetJobManifestWithContext is the same as GetJobManifest with the addition of
// the ability to pass a context and additional request options.
//
// See GetJobManifest for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) GetJobManifestWithContext(ctx aws.Context, input *GetJobManifestInput, opts ...request.Option) (*GetJobManifestOutput, error) {
req, out := c.GetJobManifestRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetJobUnlockCode = "GetJobUnlockCode"
// GetJobUnlockCodeRequest generates a "aws/request.Request" representing the
// client's request for the GetJobUnlockCode operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetJobUnlockCode for more information on using the GetJobUnlockCode
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetJobUnlockCodeRequest method.
// req, resp := client.GetJobUnlockCodeRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetJobUnlockCode
func (c *Snowball) GetJobUnlockCodeRequest(input *GetJobUnlockCodeInput) (req *request.Request, output *GetJobUnlockCodeOutput) {
op := &request.Operation{
Name: opGetJobUnlockCode,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetJobUnlockCodeInput{}
}
output = &GetJobUnlockCodeOutput{}
req = c.newRequest(op, input, output)
return
}
// GetJobUnlockCode API operation for Amazon Import/Export Snowball.
//
// Returns the UnlockCode code value for the specified job. A particular UnlockCode
// value can be accessed for up to 360 days after the associated job has been
// created.
//
// The UnlockCode value is a 29-character code with 25 alphanumeric characters
// and 4 hyphens. This code is used to decrypt the manifest file when it is
// passed along with the manifest to the Snow device through the Snowball client
// when the client is started for the first time.
//
// As a best practice, we recommend that you don't save a copy of the UnlockCode
// in the same location as the manifest file for that job. Saving these separately
// helps prevent unauthorized parties from gaining access to the Snow device
// associated with that job.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation GetJobUnlockCode for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetJobUnlockCode
func (c *Snowball) GetJobUnlockCode(input *GetJobUnlockCodeInput) (*GetJobUnlockCodeOutput, error) {
req, out := c.GetJobUnlockCodeRequest(input)
return out, req.Send()
}
// GetJobUnlockCodeWithContext is the same as GetJobUnlockCode with the addition of
// the ability to pass a context and additional request options.
//
// See GetJobUnlockCode for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) GetJobUnlockCodeWithContext(ctx aws.Context, input *GetJobUnlockCodeInput, opts ...request.Option) (*GetJobUnlockCodeOutput, error) {
req, out := c.GetJobUnlockCodeRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetSnowballUsage = "GetSnowballUsage"
// GetSnowballUsageRequest generates a "aws/request.Request" representing the
// client's request for the GetSnowballUsage operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetSnowballUsage for more information on using the GetSnowballUsage
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetSnowballUsageRequest method.
// req, resp := client.GetSnowballUsageRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetSnowballUsage
func (c *Snowball) GetSnowballUsageRequest(input *GetSnowballUsageInput) (req *request.Request, output *GetSnowballUsageOutput) {
op := &request.Operation{
Name: opGetSnowballUsage,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetSnowballUsageInput{}
}
output = &GetSnowballUsageOutput{}
req = c.newRequest(op, input, output)
return
}
// GetSnowballUsage API operation for Amazon Import/Export Snowball.
//
// Returns information about the Snow Family service limit for your account,
// and also the number of Snow devices your account has in use.
//
// The default service limit for the number of Snow devices that you can have
// at one time is 1. If you want to increase your service limit, contact AWS
// Support.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation GetSnowballUsage for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetSnowballUsage
func (c *Snowball) GetSnowballUsage(input *GetSnowballUsageInput) (*GetSnowballUsageOutput, error) {
req, out := c.GetSnowballUsageRequest(input)
return out, req.Send()
}
// GetSnowballUsageWithContext is the same as GetSnowballUsage with the addition of
// the ability to pass a context and additional request options.
//
// See GetSnowballUsage for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) GetSnowballUsageWithContext(ctx aws.Context, input *GetSnowballUsageInput, opts ...request.Option) (*GetSnowballUsageOutput, error) {
req, out := c.GetSnowballUsageRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetSoftwareUpdates = "GetSoftwareUpdates"
// GetSoftwareUpdatesRequest generates a "aws/request.Request" representing the
// client's request for the GetSoftwareUpdates operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetSoftwareUpdates for more information on using the GetSoftwareUpdates
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetSoftwareUpdatesRequest method.
// req, resp := client.GetSoftwareUpdatesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetSoftwareUpdates
func (c *Snowball) GetSoftwareUpdatesRequest(input *GetSoftwareUpdatesInput) (req *request.Request, output *GetSoftwareUpdatesOutput) {
op := &request.Operation{
Name: opGetSoftwareUpdates,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetSoftwareUpdatesInput{}
}
output = &GetSoftwareUpdatesOutput{}
req = c.newRequest(op, input, output)
return
}
// GetSoftwareUpdates API operation for Amazon Import/Export Snowball.
//
// Returns an Amazon S3 presigned URL for an update file associated with a specified
// JobId.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation GetSoftwareUpdates for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/GetSoftwareUpdates
func (c *Snowball) GetSoftwareUpdates(input *GetSoftwareUpdatesInput) (*GetSoftwareUpdatesOutput, error) {
req, out := c.GetSoftwareUpdatesRequest(input)
return out, req.Send()
}
// GetSoftwareUpdatesWithContext is the same as GetSoftwareUpdates with the addition of
// the ability to pass a context and additional request options.
//
// See GetSoftwareUpdates for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) GetSoftwareUpdatesWithContext(ctx aws.Context, input *GetSoftwareUpdatesInput, opts ...request.Option) (*GetSoftwareUpdatesOutput, error) {
req, out := c.GetSoftwareUpdatesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListClusterJobs = "ListClusterJobs"
// ListClusterJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListClusterJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListClusterJobs for more information on using the ListClusterJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListClusterJobsRequest method.
// req, resp := client.ListClusterJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs
func (c *Snowball) ListClusterJobsRequest(input *ListClusterJobsInput) (req *request.Request, output *ListClusterJobsOutput) {
op := &request.Operation{
Name: opListClusterJobs,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListClusterJobsInput{}
}
output = &ListClusterJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListClusterJobs API operation for Amazon Import/Export Snowball.
//
// Returns an array of JobListEntry objects of the specified length. Each JobListEntry
// object is for a job in the specified cluster and contains a job's state,
// a job's ID, and other information.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation ListClusterJobs for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusterJobs
func (c *Snowball) ListClusterJobs(input *ListClusterJobsInput) (*ListClusterJobsOutput, error) {
req, out := c.ListClusterJobsRequest(input)
return out, req.Send()
}
// ListClusterJobsWithContext is the same as ListClusterJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListClusterJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListClusterJobsWithContext(ctx aws.Context, input *ListClusterJobsInput, opts ...request.Option) (*ListClusterJobsOutput, error) {
req, out := c.ListClusterJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListClusters = "ListClusters"
// ListClustersRequest generates a "aws/request.Request" representing the
// client's request for the ListClusters operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListClusters for more information on using the ListClusters
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListClustersRequest method.
// req, resp := client.ListClustersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters
func (c *Snowball) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) {
op := &request.Operation{
Name: opListClusters,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListClustersInput{}
}
output = &ListClustersOutput{}
req = c.newRequest(op, input, output)
return
}
// ListClusters API operation for Amazon Import/Export Snowball.
//
// Returns an array of ClusterListEntry objects of the specified length. Each
// ClusterListEntry object contains a cluster's state, a cluster's ID, and other
// important status information.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation ListClusters for usage and error information.
//
// Returned Error Types:
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListClusters
func (c *Snowball) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) {
req, out := c.ListClustersRequest(input)
return out, req.Send()
}
// ListClustersWithContext is the same as ListClusters with the addition of
// the ability to pass a context and additional request options.
//
// See ListClusters for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) {
req, out := c.ListClustersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListCompatibleImages = "ListCompatibleImages"
// ListCompatibleImagesRequest generates a "aws/request.Request" representing the
// client's request for the ListCompatibleImages operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListCompatibleImages for more information on using the ListCompatibleImages
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListCompatibleImagesRequest method.
// req, resp := client.ListCompatibleImagesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages
func (c *Snowball) ListCompatibleImagesRequest(input *ListCompatibleImagesInput) (req *request.Request, output *ListCompatibleImagesOutput) {
op := &request.Operation{
Name: opListCompatibleImages,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListCompatibleImagesInput{}
}
output = &ListCompatibleImagesOutput{}
req = c.newRequest(op, input, output)
return
}
// ListCompatibleImages API operation for Amazon Import/Export Snowball.
//
// This action returns a list of the different Amazon EC2 Amazon Machine Images
// (AMIs) that are owned by your AWS account that would be supported for use
// on a Snow device. Currently, supported AMIs are based on the CentOS 7 (x86_64)
// - with Updates HVM, Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS -
// Xenial (HVM) images, available on the AWS Marketplace.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation ListCompatibleImages for usage and error information.
//
// Returned Error Types:
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// * Ec2RequestFailedException
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListCompatibleImages
func (c *Snowball) ListCompatibleImages(input *ListCompatibleImagesInput) (*ListCompatibleImagesOutput, error) {
req, out := c.ListCompatibleImagesRequest(input)
return out, req.Send()
}
// ListCompatibleImagesWithContext is the same as ListCompatibleImages with the addition of
// the ability to pass a context and additional request options.
//
// See ListCompatibleImages for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListCompatibleImagesWithContext(ctx aws.Context, input *ListCompatibleImagesInput, opts ...request.Option) (*ListCompatibleImagesOutput, error) {
req, out := c.ListCompatibleImagesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListJobs = "ListJobs"
// ListJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListJobs for more information on using the ListJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListJobsRequest method.
// req, resp := client.ListJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs
func (c *Snowball) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) {
op := &request.Operation{
Name: opListJobs,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListJobsInput{}
}
output = &ListJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListJobs API operation for Amazon Import/Export Snowball.
//
// Returns an array of JobListEntry objects of the specified length. Each JobListEntry
// object contains a job's state, a job's ID, and a value that indicates whether
// the job is a job part, in the case of export jobs. Calling this API action
// in one of the US regions will return jobs from the list of all jobs associated
// with this account in all US regions.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation ListJobs for usage and error information.
//
// Returned Error Types:
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListJobs
func (c *Snowball) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) {
req, out := c.ListJobsRequest(input)
return out, req.Send()
}
// ListJobsWithContext is the same as ListJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) {
req, out := c.ListJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListJobsPages iterates over the pages of a ListJobs operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListJobs method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListJobs operation.
// pageNum := 0
// err := client.ListJobsPages(params,
// func(page *snowball.ListJobsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Snowball) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error {
return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListJobsPagesWithContext same as ListJobsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListJobsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListJobsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListLongTermPricing = "ListLongTermPricing"
// ListLongTermPricingRequest generates a "aws/request.Request" representing the
// client's request for the ListLongTermPricing operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListLongTermPricing for more information on using the ListLongTermPricing
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListLongTermPricingRequest method.
// req, resp := client.ListLongTermPricingRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListLongTermPricing
func (c *Snowball) ListLongTermPricingRequest(input *ListLongTermPricingInput) (req *request.Request, output *ListLongTermPricingOutput) {
op := &request.Operation{
Name: opListLongTermPricing,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListLongTermPricingInput{}
}
output = &ListLongTermPricingOutput{}
req = c.newRequest(op, input, output)
return
}
// ListLongTermPricing API operation for Amazon Import/Export Snowball.
//
// Lists all long-term pricing types.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation ListLongTermPricing for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidNextTokenException
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/ListLongTermPricing
func (c *Snowball) ListLongTermPricing(input *ListLongTermPricingInput) (*ListLongTermPricingOutput, error) {
req, out := c.ListLongTermPricingRequest(input)
return out, req.Send()
}
// ListLongTermPricingWithContext is the same as ListLongTermPricing with the addition of
// the ability to pass a context and additional request options.
//
// See ListLongTermPricing for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) ListLongTermPricingWithContext(ctx aws.Context, input *ListLongTermPricingInput, opts ...request.Option) (*ListLongTermPricingOutput, error) {
req, out := c.ListLongTermPricingRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateCluster = "UpdateCluster"
// UpdateClusterRequest generates a "aws/request.Request" representing the
// client's request for the UpdateCluster operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateCluster for more information on using the UpdateCluster
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateClusterRequest method.
// req, resp := client.UpdateClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateCluster
func (c *Snowball) UpdateClusterRequest(input *UpdateClusterInput) (req *request.Request, output *UpdateClusterOutput) {
op := &request.Operation{
Name: opUpdateCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateClusterInput{}
}
output = &UpdateClusterOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateCluster API operation for Amazon Import/Export Snowball.
//
// While a cluster's ClusterState value is in the AwaitingQuorum state, you
// can update some of the information associated with a cluster. Once the cluster
// changes to a different job state, usually 60 minutes after the cluster being
// created, this action is no longer available.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation UpdateCluster for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// * InvalidInputCombinationException
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
//
// * Ec2RequestFailedException
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateCluster
func (c *Snowball) UpdateCluster(input *UpdateClusterInput) (*UpdateClusterOutput, error) {
req, out := c.UpdateClusterRequest(input)
return out, req.Send()
}
// UpdateClusterWithContext is the same as UpdateCluster with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) UpdateClusterWithContext(ctx aws.Context, input *UpdateClusterInput, opts ...request.Option) (*UpdateClusterOutput, error) {
req, out := c.UpdateClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateJob = "UpdateJob"
// UpdateJobRequest generates a "aws/request.Request" representing the
// client's request for the UpdateJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateJob for more information on using the UpdateJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateJobRequest method.
// req, resp := client.UpdateJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateJob
func (c *Snowball) UpdateJobRequest(input *UpdateJobInput) (req *request.Request, output *UpdateJobOutput) {
op := &request.Operation{
Name: opUpdateJob,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateJobInput{}
}
output = &UpdateJobOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateJob API operation for Amazon Import/Export Snowball.
//
// While a job's JobState value is New, you can update some of the information
// associated with a job. Once the job changes to a different job state, usually
// within 60 minutes of the job being created, this action is no longer available.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation UpdateJob for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// * KMSRequestFailedException
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
//
// * InvalidInputCombinationException
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
//
// * ClusterLimitExceededException
// Job creation failed. Currently, clusters support five nodes. If you have
// fewer than five nodes for your cluster and you have more nodes to create
// for this cluster, try again and create jobs until your cluster has exactly
// five nodes.
//
// * Ec2RequestFailedException
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateJob
func (c *Snowball) UpdateJob(input *UpdateJobInput) (*UpdateJobOutput, error) {
req, out := c.UpdateJobRequest(input)
return out, req.Send()
}
// UpdateJobWithContext is the same as UpdateJob with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) UpdateJobWithContext(ctx aws.Context, input *UpdateJobInput, opts ...request.Option) (*UpdateJobOutput, error) {
req, out := c.UpdateJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateJobShipmentState = "UpdateJobShipmentState"
// UpdateJobShipmentStateRequest generates a "aws/request.Request" representing the
// client's request for the UpdateJobShipmentState operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateJobShipmentState for more information on using the UpdateJobShipmentState
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateJobShipmentStateRequest method.
// req, resp := client.UpdateJobShipmentStateRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateJobShipmentState
func (c *Snowball) UpdateJobShipmentStateRequest(input *UpdateJobShipmentStateInput) (req *request.Request, output *UpdateJobShipmentStateOutput) {
op := &request.Operation{
Name: opUpdateJobShipmentState,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateJobShipmentStateInput{}
}
output = &UpdateJobShipmentStateOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateJobShipmentState API operation for Amazon Import/Export Snowball.
//
// Updates the state when a shipment state changes to a different state.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation UpdateJobShipmentState for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// * InvalidJobStateException
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateJobShipmentState
func (c *Snowball) UpdateJobShipmentState(input *UpdateJobShipmentStateInput) (*UpdateJobShipmentStateOutput, error) {
req, out := c.UpdateJobShipmentStateRequest(input)
return out, req.Send()
}
// UpdateJobShipmentStateWithContext is the same as UpdateJobShipmentState with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateJobShipmentState for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) UpdateJobShipmentStateWithContext(ctx aws.Context, input *UpdateJobShipmentStateInput, opts ...request.Option) (*UpdateJobShipmentStateOutput, error) {
req, out := c.UpdateJobShipmentStateRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateLongTermPricing = "UpdateLongTermPricing"
// UpdateLongTermPricingRequest generates a "aws/request.Request" representing the
// client's request for the UpdateLongTermPricing operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateLongTermPricing for more information on using the UpdateLongTermPricing
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateLongTermPricingRequest method.
// req, resp := client.UpdateLongTermPricingRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateLongTermPricing
func (c *Snowball) UpdateLongTermPricingRequest(input *UpdateLongTermPricingInput) (req *request.Request, output *UpdateLongTermPricingOutput) {
op := &request.Operation{
Name: opUpdateLongTermPricing,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateLongTermPricingInput{}
}
output = &UpdateLongTermPricingOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateLongTermPricing API operation for Amazon Import/Export Snowball.
//
// Updates the long-term pricing type.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Import/Export Snowball's
// API operation UpdateLongTermPricing for usage and error information.
//
// Returned Error Types:
// * InvalidResourceException
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/snowball-2016-06-30/UpdateLongTermPricing
func (c *Snowball) UpdateLongTermPricing(input *UpdateLongTermPricingInput) (*UpdateLongTermPricingOutput, error) {
req, out := c.UpdateLongTermPricingRequest(input)
return out, req.Send()
}
// UpdateLongTermPricingWithContext is the same as UpdateLongTermPricing with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateLongTermPricing for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Snowball) UpdateLongTermPricingWithContext(ctx aws.Context, input *UpdateLongTermPricingInput, opts ...request.Option) (*UpdateLongTermPricingOutput, error) {
req, out := c.UpdateLongTermPricingRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// The address that you want the Snow device(s) associated with a specific job
// to be shipped to. Addresses are validated at the time of creation. The address
// you provide must be located within the serviceable area of your region. Although
// no individual elements of the Address are required, if the address is invalid
// or unsupported, then an exception is thrown.
type Address struct {
_ struct{} `type:"structure"`
// The unique ID for an address.
AddressId *string `min:"40" type:"string"`
// The city in an address that a Snow device is to be delivered to.
City *string `min:"1" type:"string"`
// The name of the company to receive a Snow device at an address.
Company *string `min:"1" type:"string"`
// The country in an address that a Snow device is to be delivered to.
Country *string `min:"1" type:"string"`
// If the address you are creating is a primary address, then set this option
// to true. This field is not supported in most regions.
IsRestricted *bool `type:"boolean"`
// This field is no longer used and the value is ignored.
Landmark *string `min:"1" type:"string"`
// The name of a person to receive a Snow device at an address.
Name *string `min:"1" type:"string"`
// The phone number associated with an address that a Snow device is to be delivered
// to.
PhoneNumber *string `min:"1" type:"string"`
// The postal code in an address that a Snow device is to be delivered to.
PostalCode *string `min:"1" type:"string"`
// This field is no longer used and the value is ignored.
PrefectureOrDistrict *string `min:"1" type:"string"`
// The state or province in an address that a Snow device is to be delivered
// to.
StateOrProvince *string `min:"1" type:"string"`
// The first line in a street address that a Snow device is to be delivered
// to.
Street1 *string `min:"1" type:"string"`
// The second line in a street address that a Snow device is to be delivered
// to.
Street2 *string `min:"1" type:"string"`
// The third line in a street address that a Snow device is to be delivered
// to.
Street3 *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Address) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Address) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Address) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Address"}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if s.City != nil && len(*s.City) < 1 {
invalidParams.Add(request.NewErrParamMinLen("City", 1))
}
if s.Company != nil && len(*s.Company) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Company", 1))
}
if s.Country != nil && len(*s.Country) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Country", 1))
}
if s.Landmark != nil && len(*s.Landmark) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Landmark", 1))
}
if s.Name != nil && len(*s.Name) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
}
if s.PhoneNumber != nil && len(*s.PhoneNumber) < 1 {
invalidParams.Add(request.NewErrParamMinLen("PhoneNumber", 1))
}
if s.PostalCode != nil && len(*s.PostalCode) < 1 {
invalidParams.Add(request.NewErrParamMinLen("PostalCode", 1))
}
if s.PrefectureOrDistrict != nil && len(*s.PrefectureOrDistrict) < 1 {
invalidParams.Add(request.NewErrParamMinLen("PrefectureOrDistrict", 1))
}
if s.StateOrProvince != nil && len(*s.StateOrProvince) < 1 {
invalidParams.Add(request.NewErrParamMinLen("StateOrProvince", 1))
}
if s.Street1 != nil && len(*s.Street1) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Street1", 1))
}
if s.Street2 != nil && len(*s.Street2) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Street2", 1))
}
if s.Street3 != nil && len(*s.Street3) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Street3", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *Address) SetAddressId(v string) *Address {
s.AddressId = &v
return s
}
// SetCity sets the City field's value.
func (s *Address) SetCity(v string) *Address {
s.City = &v
return s
}
// SetCompany sets the Company field's value.
func (s *Address) SetCompany(v string) *Address {
s.Company = &v
return s
}
// SetCountry sets the Country field's value.
func (s *Address) SetCountry(v string) *Address {
s.Country = &v
return s
}
// SetIsRestricted sets the IsRestricted field's value.
func (s *Address) SetIsRestricted(v bool) *Address {
s.IsRestricted = &v
return s
}
// SetLandmark sets the Landmark field's value.
func (s *Address) SetLandmark(v string) *Address {
s.Landmark = &v
return s
}
// SetName sets the Name field's value.
func (s *Address) SetName(v string) *Address {
s.Name = &v
return s
}
// SetPhoneNumber sets the PhoneNumber field's value.
func (s *Address) SetPhoneNumber(v string) *Address {
s.PhoneNumber = &v
return s
}
// SetPostalCode sets the PostalCode field's value.
func (s *Address) SetPostalCode(v string) *Address {
s.PostalCode = &v
return s
}
// SetPrefectureOrDistrict sets the PrefectureOrDistrict field's value.
func (s *Address) SetPrefectureOrDistrict(v string) *Address {
s.PrefectureOrDistrict = &v
return s
}
// SetStateOrProvince sets the StateOrProvince field's value.
func (s *Address) SetStateOrProvince(v string) *Address {
s.StateOrProvince = &v
return s
}
// SetStreet1 sets the Street1 field's value.
func (s *Address) SetStreet1(v string) *Address {
s.Street1 = &v
return s
}
// SetStreet2 sets the Street2 field's value.
func (s *Address) SetStreet2(v string) *Address {
s.Street2 = &v
return s
}
// SetStreet3 sets the Street3 field's value.
func (s *Address) SetStreet3(v string) *Address {
s.Street3 = &v
return s
}
type CancelClusterInput struct {
_ struct{} `type:"structure"`
// The 39-character ID for the cluster that you want to cancel, for example
// CID123e4567-e89b-12d3-a456-426655440000.
//
// ClusterId is a required field
ClusterId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CancelClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CancelClusterInput"}
if s.ClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("ClusterId"))
}
if s.ClusterId != nil && len(*s.ClusterId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ClusterId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetClusterId sets the ClusterId field's value.
func (s *CancelClusterInput) SetClusterId(v string) *CancelClusterInput {
s.ClusterId = &v
return s
}
type CancelClusterOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelClusterOutput) GoString() string {
return s.String()
}
type CancelJobInput struct {
_ struct{} `type:"structure"`
// The 39-character job ID for the job that you want to cancel, for example
// JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CancelJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CancelJobInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *CancelJobInput) SetJobId(v string) *CancelJobInput {
s.JobId = &v
return s
}
type CancelJobOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CancelJobOutput) GoString() string {
return s.String()
}
// Job creation failed. Currently, clusters support five nodes. If you have
// fewer than five nodes for your cluster and you have more nodes to create
// for this cluster, try again and create jobs until your cluster has exactly
// five nodes.
type ClusterLimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterLimitExceededException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterLimitExceededException) GoString() string {
return s.String()
}
func newErrorClusterLimitExceededException(v protocol.ResponseMetadata) error {
return &ClusterLimitExceededException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ClusterLimitExceededException) Code() string {
return "ClusterLimitExceededException"
}
// Message returns the exception's message.
func (s *ClusterLimitExceededException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ClusterLimitExceededException) OrigErr() error {
return nil
}
func (s *ClusterLimitExceededException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ClusterLimitExceededException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ClusterLimitExceededException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains a cluster's state, a cluster's ID, and other important information.
type ClusterListEntry struct {
_ struct{} `type:"structure"`
// The 39-character ID for the cluster that you want to list, for example CID123e4567-e89b-12d3-a456-426655440000.
ClusterId *string `min:"1" type:"string"`
// The current state of this cluster. For information about the state of a specific
// node, see JobListEntry$JobState.
ClusterState *string `type:"string" enum:"ClusterState"`
// The creation date for this cluster.
CreationDate *time.Time `type:"timestamp"`
// Defines an optional description of the cluster, for example Environmental
// Data Cluster-01.
Description *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterListEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterListEntry) GoString() string {
return s.String()
}
// SetClusterId sets the ClusterId field's value.
func (s *ClusterListEntry) SetClusterId(v string) *ClusterListEntry {
s.ClusterId = &v
return s
}
// SetClusterState sets the ClusterState field's value.
func (s *ClusterListEntry) SetClusterState(v string) *ClusterListEntry {
s.ClusterState = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *ClusterListEntry) SetCreationDate(v time.Time) *ClusterListEntry {
s.CreationDate = &v
return s
}
// SetDescription sets the Description field's value.
func (s *ClusterListEntry) SetDescription(v string) *ClusterListEntry {
s.Description = &v
return s
}
// Contains metadata about a specific cluster.
type ClusterMetadata struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a specific address.
AddressId *string `min:"40" type:"string"`
// The automatically generated ID for a cluster.
ClusterId *string `min:"1" type:"string"`
// The current status of the cluster.
ClusterState *string `type:"string" enum:"ClusterState"`
// The creation date for this cluster.
CreationDate *time.Time `type:"timestamp"`
// The optional description of the cluster.
Description *string `min:"1" type:"string"`
// The ID of the address that you want a cluster shipped to, after it will be
// shipped to its primary address. This field is not supported in most regions.
ForwardingAddressId *string `min:"40" type:"string"`
// The type of job for this cluster. Currently, the only job type supported
// for clusters is LOCAL_USE.
JobType *string `type:"string" enum:"JobType"`
// The KmsKeyARN Amazon Resource Name (ARN) associated with this cluster. This
// ARN was created using the CreateKey (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html)
// API action in AWS Key Management Service (AWS KMS).
KmsKeyARN *string `type:"string"`
// The Amazon Simple Notification Service (Amazon SNS) notification settings
// for this cluster.
Notification *Notification `type:"structure"`
// Represents metadata and configuration settings for services on an AWS Snow
// Family device.
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// The arrays of JobResource objects that can include updated S3Resource objects
// or LambdaResource objects.
Resources *JobResource `type:"structure"`
// The role ARN associated with this cluster. This ARN was created using the
// CreateRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)
// API action in AWS Identity and Access Management (IAM).
RoleARN *string `type:"string"`
// The shipping speed for each node in this cluster. This speed doesn't dictate
// how soon you'll get each device, rather it represents how quickly each device
// moves to its destination while in transit. Regional shipping speeds are as
// follows:
//
// * In Australia, you have access to express shipping. Typically, devices
// shipped express are delivered in about a day.
//
// * In the European Union (EU), you have access to express shipping. Typically,
// Snow devices shipped express are delivered in about a day. In addition,
// most countries in the EU have access to standard shipping, which typically
// takes less than a week, one way.
//
// * In India, Snow devices are delivered in one to seven days.
//
// * In the US, you have access to one-day shipping and two-day shipping.
ShippingOption *string `type:"string" enum:"ShippingOption"`
// The type of AWS Snow device to use for this cluster.
//
// For cluster jobs, AWS Snow Family currently supports only the EDGE device
// type.
SnowballType *string `type:"string" enum:"Type"`
// The tax documents required in your AWS Region.
TaxDocuments *TaxDocuments `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterMetadata) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ClusterMetadata) GoString() string {
return s.String()
}
// SetAddressId sets the AddressId field's value.
func (s *ClusterMetadata) SetAddressId(v string) *ClusterMetadata {
s.AddressId = &v
return s
}
// SetClusterId sets the ClusterId field's value.
func (s *ClusterMetadata) SetClusterId(v string) *ClusterMetadata {
s.ClusterId = &v
return s
}
// SetClusterState sets the ClusterState field's value.
func (s *ClusterMetadata) SetClusterState(v string) *ClusterMetadata {
s.ClusterState = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *ClusterMetadata) SetCreationDate(v time.Time) *ClusterMetadata {
s.CreationDate = &v
return s
}
// SetDescription sets the Description field's value.
func (s *ClusterMetadata) SetDescription(v string) *ClusterMetadata {
s.Description = &v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *ClusterMetadata) SetForwardingAddressId(v string) *ClusterMetadata {
s.ForwardingAddressId = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *ClusterMetadata) SetJobType(v string) *ClusterMetadata {
s.JobType = &v
return s
}
// SetKmsKeyARN sets the KmsKeyARN field's value.
func (s *ClusterMetadata) SetKmsKeyARN(v string) *ClusterMetadata {
s.KmsKeyARN = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *ClusterMetadata) SetNotification(v *Notification) *ClusterMetadata {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *ClusterMetadata) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *ClusterMetadata {
s.OnDeviceServiceConfiguration = v
return s
}
// SetResources sets the Resources field's value.
func (s *ClusterMetadata) SetResources(v *JobResource) *ClusterMetadata {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ClusterMetadata) SetRoleARN(v string) *ClusterMetadata {
s.RoleARN = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *ClusterMetadata) SetShippingOption(v string) *ClusterMetadata {
s.ShippingOption = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *ClusterMetadata) SetSnowballType(v string) *ClusterMetadata {
s.SnowballType = &v
return s
}
// SetTaxDocuments sets the TaxDocuments field's value.
func (s *ClusterMetadata) SetTaxDocuments(v *TaxDocuments) *ClusterMetadata {
s.TaxDocuments = v
return s
}
// A JSON-formatted object that describes a compatible Amazon Machine Image
// (AMI), including the ID and name for a Snow device AMI. This AMI is compatible
// with the device's physical hardware requirements, and it should be able to
// be run in an SBE1 instance on the device.
type CompatibleImage struct {
_ struct{} `type:"structure"`
// The unique identifier for an individual Snow device AMI.
AmiId *string `min:"1" type:"string"`
// The optional name of a compatible image.
Name *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CompatibleImage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CompatibleImage) GoString() string {
return s.String()
}
// SetAmiId sets the AmiId field's value.
func (s *CompatibleImage) SetAmiId(v string) *CompatibleImage {
s.AmiId = &v
return s
}
// SetName sets the Name field's value.
func (s *CompatibleImage) SetName(v string) *CompatibleImage {
s.Name = &v
return s
}
// You get this exception when you call CreateReturnShippingLabel more than
// once when other requests are not completed.
type ConflictException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
ConflictResource *string `min:"1" type:"string"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ConflictException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ConflictException) GoString() string {
return s.String()
}
func newErrorConflictException(v protocol.ResponseMetadata) error {
return &ConflictException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ConflictException) Code() string {
return "ConflictException"
}
// Message returns the exception's message.
func (s *ConflictException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ConflictException) OrigErr() error {
return nil
}
func (s *ConflictException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ConflictException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ConflictException) RequestID() string {
return s.RespMetadata.RequestID
}
type CreateAddressInput struct {
_ struct{} `type:"structure"`
// The address that you want the Snow device shipped to.
//
// Address is a required field
Address *Address `type:"structure" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateAddressInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateAddressInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateAddressInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateAddressInput"}
if s.Address == nil {
invalidParams.Add(request.NewErrParamRequired("Address"))
}
if s.Address != nil {
if err := s.Address.Validate(); err != nil {
invalidParams.AddNested("Address", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddress sets the Address field's value.
func (s *CreateAddressInput) SetAddress(v *Address) *CreateAddressInput {
s.Address = v
return s
}
type CreateAddressOutput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a specific address. You'll use this ID
// when you create a job to specify which address you want the Snow device for
// that job shipped to.
AddressId *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateAddressOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateAddressOutput) GoString() string {
return s.String()
}
// SetAddressId sets the AddressId field's value.
func (s *CreateAddressOutput) SetAddressId(v string) *CreateAddressOutput {
s.AddressId = &v
return s
}
type CreateClusterInput struct {
_ struct{} `type:"structure"`
// The ID for the address that you want the cluster shipped to.
//
// AddressId is a required field
AddressId *string `min:"40" type:"string" required:"true"`
// An optional description of this specific cluster, for example Environmental
// Data Cluster-01.
Description *string `min:"1" type:"string"`
// The forwarding address ID for a cluster. This field is not supported in most
// regions.
ForwardingAddressId *string `min:"40" type:"string"`
// The type of job for this cluster. Currently, the only job type supported
// for clusters is LOCAL_USE.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
//
// JobType is a required field
JobType *string `type:"string" required:"true" enum:"JobType"`
// The KmsKeyARN value that you want to associate with this cluster. KmsKeyARN
// values are created by using the CreateKey (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html)
// API action in AWS Key Management Service (AWS KMS).
KmsKeyARN *string `type:"string"`
// The Amazon Simple Notification Service (Amazon SNS) notification settings
// for this cluster.
Notification *Notification `type:"structure"`
// Specifies the service or services on the Snow Family device that your transferred
// data will be exported from or imported into. AWS Snow Family supports Amazon
// S3 and NFS (Network File System).
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// Allows you to securely operate and manage Snow devices in a cluster remotely
// from outside of your internal network. When set to INSTALLED_AUTOSTART, remote
// management will automatically be available when the device arrives at your
// location. Otherwise, you need to use the Snowball Client to manage the device.
RemoteManagement *string `type:"string" enum:"RemoteManagement"`
// The resources associated with the cluster job. These resources include Amazon
// S3 buckets and optional AWS Lambda functions written in the Python language.
//
// Resources is a required field
Resources *JobResource `type:"structure" required:"true"`
// The RoleARN that you want to associate with this cluster. RoleArn values
// are created by using the CreateRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)
// API action in AWS Identity and Access Management (IAM).
//
// RoleARN is a required field
RoleARN *string `type:"string" required:"true"`
// The shipping speed for each node in this cluster. This speed doesn't dictate
// how soon you'll get each Snowball Edge device, rather it represents how quickly
// each device moves to its destination while in transit. Regional shipping
// speeds are as follows:
//
// * In Australia, you have access to express shipping. Typically, Snow devices
// shipped express are delivered in about a day.
//
// * In the European Union (EU), you have access to express shipping. Typically,
// Snow devices shipped express are delivered in about a day. In addition,
// most countries in the EU have access to standard shipping, which typically
// takes less than a week, one way.
//
// * In India, Snow devices are delivered in one to seven days.
//
// * In the United States of America (US), you have access to one-day shipping
// and two-day shipping.
//
// * In Australia, you have access to express shipping. Typically, devices
// shipped express are delivered in about a day.
//
// * In the European Union (EU), you have access to express shipping. Typically,
// Snow devices shipped express are delivered in about a day. In addition,
// most countries in the EU have access to standard shipping, which typically
// takes less than a week, one way.
//
// * In India, Snow devices are delivered in one to seven days.
//
// * In the US, you have access to one-day shipping and two-day shipping.
//
// ShippingOption is a required field
ShippingOption *string `type:"string" required:"true" enum:"ShippingOption"`
// The type of AWS Snow Family device to use for this cluster.
//
// For cluster jobs, AWS Snow Family currently supports only the EDGE device
// type.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
//
// SnowballType is a required field
SnowballType *string `type:"string" required:"true" enum:"Type"`
// The tax documents required in your AWS Region.
TaxDocuments *TaxDocuments `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateClusterInput"}
if s.AddressId == nil {
invalidParams.Add(request.NewErrParamRequired("AddressId"))
}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if s.Description != nil && len(*s.Description) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Description", 1))
}
if s.ForwardingAddressId != nil && len(*s.ForwardingAddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("ForwardingAddressId", 40))
}
if s.JobType == nil {
invalidParams.Add(request.NewErrParamRequired("JobType"))
}
if s.Resources == nil {
invalidParams.Add(request.NewErrParamRequired("Resources"))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.ShippingOption == nil {
invalidParams.Add(request.NewErrParamRequired("ShippingOption"))
}
if s.SnowballType == nil {
invalidParams.Add(request.NewErrParamRequired("SnowballType"))
}
if s.Resources != nil {
if err := s.Resources.Validate(); err != nil {
invalidParams.AddNested("Resources", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *CreateClusterInput) SetAddressId(v string) *CreateClusterInput {
s.AddressId = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateClusterInput) SetDescription(v string) *CreateClusterInput {
s.Description = &v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *CreateClusterInput) SetForwardingAddressId(v string) *CreateClusterInput {
s.ForwardingAddressId = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *CreateClusterInput) SetJobType(v string) *CreateClusterInput {
s.JobType = &v
return s
}
// SetKmsKeyARN sets the KmsKeyARN field's value.
func (s *CreateClusterInput) SetKmsKeyARN(v string) *CreateClusterInput {
s.KmsKeyARN = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *CreateClusterInput) SetNotification(v *Notification) *CreateClusterInput {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *CreateClusterInput) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *CreateClusterInput {
s.OnDeviceServiceConfiguration = v
return s
}
// SetRemoteManagement sets the RemoteManagement field's value.
func (s *CreateClusterInput) SetRemoteManagement(v string) *CreateClusterInput {
s.RemoteManagement = &v
return s
}
// SetResources sets the Resources field's value.
func (s *CreateClusterInput) SetResources(v *JobResource) *CreateClusterInput {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *CreateClusterInput) SetRoleARN(v string) *CreateClusterInput {
s.RoleARN = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *CreateClusterInput) SetShippingOption(v string) *CreateClusterInput {
s.ShippingOption = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *CreateClusterInput) SetSnowballType(v string) *CreateClusterInput {
s.SnowballType = &v
return s
}
// SetTaxDocuments sets the TaxDocuments field's value.
func (s *CreateClusterInput) SetTaxDocuments(v *TaxDocuments) *CreateClusterInput {
s.TaxDocuments = v
return s
}
type CreateClusterOutput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a cluster.
ClusterId *string `min:"39" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateClusterOutput) GoString() string {
return s.String()
}
// SetClusterId sets the ClusterId field's value.
func (s *CreateClusterOutput) SetClusterId(v string) *CreateClusterOutput {
s.ClusterId = &v
return s
}
type CreateJobInput struct {
_ struct{} `type:"structure"`
// The ID for the address that you want the Snow device shipped to.
AddressId *string `min:"40" type:"string"`
// The ID of a cluster. If you're creating a job for a node in a cluster, you
// need to provide only this clusterId value. The other job attributes are inherited
// from the cluster.
ClusterId *string `min:"39" type:"string"`
// Defines an optional description of this specific job, for example Important
// Photos 2016-08-11.
Description *string `min:"1" type:"string"`
// Defines the device configuration for an AWS Snowcone job.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
DeviceConfiguration *DeviceConfiguration `type:"structure"`
// The forwarding address ID for a job. This field is not supported in most
// Regions.
ForwardingAddressId *string `min:"40" type:"string"`
// Defines the type of job that you're creating.
JobType *string `type:"string" enum:"JobType"`
// The KmsKeyARN that you want to associate with this job. KmsKeyARNs are created
// using the CreateKey (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html)
// AWS Key Management Service (KMS) API action.
KmsKeyARN *string `type:"string"`
// The ID of the long-term pricing type for the device.
LongTermPricingId *string `min:"41" type:"string"`
// Defines the Amazon Simple Notification Service (Amazon SNS) notification
// settings for this job.
Notification *Notification `type:"structure"`
// Specifies the service or services on the Snow Family device that your transferred
// data will be exported from or imported into. AWS Snow Family supports Amazon
// S3 and NFS (Network File System).
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// Allows you to securely operate and manage Snowcone devices remotely from
// outside of your internal network. When set to INSTALLED_AUTOSTART, remote
// management will automatically be available when the device arrives at your
// location. Otherwise, you need to use the Snowball Client to manage the device.
RemoteManagement *string `type:"string" enum:"RemoteManagement"`
// Defines the Amazon S3 buckets associated with this job.
//
// With IMPORT jobs, you specify the bucket or buckets that your transferred
// data will be imported into.
//
// With EXPORT jobs, you specify the bucket or buckets that your transferred
// data will be exported from. Optionally, you can also specify a KeyRange value.
// If you choose to export a range, you define the length of the range by providing
// either an inclusive BeginMarker value, an inclusive EndMarker value, or both.
// Ranges are UTF-8 binary sorted.
Resources *JobResource `type:"structure"`
// The RoleARN that you want to associate with this job. RoleArns are created
// using the CreateRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)
// AWS Identity and Access Management (IAM) API action.
RoleARN *string `type:"string"`
// The shipping speed for this job. This speed doesn't dictate how soon you'll
// get the Snow device, rather it represents how quickly the Snow device moves
// to its destination while in transit. Regional shipping speeds are as follows:
//
// * In Australia, you have access to express shipping. Typically, Snow devices
// shipped express are delivered in about a day.
//
// * In the European Union (EU), you have access to express shipping. Typically,
// Snow devices shipped express are delivered in about a day. In addition,
// most countries in the EU have access to standard shipping, which typically
// takes less than a week, one way.
//
// * In India, Snow devices are delivered in one to seven days.
//
// * In the US, you have access to one-day shipping and two-day shipping.
ShippingOption *string `type:"string" enum:"ShippingOption"`
// If your job is being created in one of the US regions, you have the option
// of specifying what size Snow device you'd like for this job. In all other
// regions, Snowballs come with 80 TB in storage capacity.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
SnowballCapacityPreference *string `type:"string" enum:"Capacity"`
// The type of AWS Snow Family device to use for this job.
//
// For cluster jobs, AWS Snow Family currently supports only the EDGE device
// type.
//
// The type of AWS Snow device to use for this job. Currently, the only supported
// device type for cluster jobs is EDGE.
//
// For more information, see Snowball Edge Device Options (https://docs.aws.amazon.com/snowball/latest/developer-guide/device-differences.html)
// in the Snowball Edge Developer Guide.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
SnowballType *string `type:"string" enum:"Type"`
// The tax documents required in your AWS Region.
TaxDocuments *TaxDocuments `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if s.ClusterId != nil && len(*s.ClusterId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ClusterId", 39))
}
if s.Description != nil && len(*s.Description) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Description", 1))
}
if s.ForwardingAddressId != nil && len(*s.ForwardingAddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("ForwardingAddressId", 40))
}
if s.LongTermPricingId != nil && len(*s.LongTermPricingId) < 41 {
invalidParams.Add(request.NewErrParamMinLen("LongTermPricingId", 41))
}
if s.Resources != nil {
if err := s.Resources.Validate(); err != nil {
invalidParams.AddNested("Resources", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *CreateJobInput) SetAddressId(v string) *CreateJobInput {
s.AddressId = &v
return s
}
// SetClusterId sets the ClusterId field's value.
func (s *CreateJobInput) SetClusterId(v string) *CreateJobInput {
s.ClusterId = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateJobInput) SetDescription(v string) *CreateJobInput {
s.Description = &v
return s
}
// SetDeviceConfiguration sets the DeviceConfiguration field's value.
func (s *CreateJobInput) SetDeviceConfiguration(v *DeviceConfiguration) *CreateJobInput {
s.DeviceConfiguration = v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *CreateJobInput) SetForwardingAddressId(v string) *CreateJobInput {
s.ForwardingAddressId = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *CreateJobInput) SetJobType(v string) *CreateJobInput {
s.JobType = &v
return s
}
// SetKmsKeyARN sets the KmsKeyARN field's value.
func (s *CreateJobInput) SetKmsKeyARN(v string) *CreateJobInput {
s.KmsKeyARN = &v
return s
}
// SetLongTermPricingId sets the LongTermPricingId field's value.
func (s *CreateJobInput) SetLongTermPricingId(v string) *CreateJobInput {
s.LongTermPricingId = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *CreateJobInput) SetNotification(v *Notification) *CreateJobInput {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *CreateJobInput) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *CreateJobInput {
s.OnDeviceServiceConfiguration = v
return s
}
// SetRemoteManagement sets the RemoteManagement field's value.
func (s *CreateJobInput) SetRemoteManagement(v string) *CreateJobInput {
s.RemoteManagement = &v
return s
}
// SetResources sets the Resources field's value.
func (s *CreateJobInput) SetResources(v *JobResource) *CreateJobInput {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *CreateJobInput) SetRoleARN(v string) *CreateJobInput {
s.RoleARN = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *CreateJobInput) SetShippingOption(v string) *CreateJobInput {
s.ShippingOption = &v
return s
}
// SetSnowballCapacityPreference sets the SnowballCapacityPreference field's value.
func (s *CreateJobInput) SetSnowballCapacityPreference(v string) *CreateJobInput {
s.SnowballCapacityPreference = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *CreateJobInput) SetSnowballType(v string) *CreateJobInput {
s.SnowballType = &v
return s
}
// SetTaxDocuments sets the TaxDocuments field's value.
func (s *CreateJobInput) SetTaxDocuments(v *TaxDocuments) *CreateJobInput {
s.TaxDocuments = v
return s
}
type CreateJobOutput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.
JobId *string `min:"39" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateJobOutput) GoString() string {
return s.String()
}
// SetJobId sets the JobId field's value.
func (s *CreateJobOutput) SetJobId(v string) *CreateJobOutput {
s.JobId = &v
return s
}
type CreateLongTermPricingInput struct {
_ struct{} `type:"structure"`
// Specifies whether the current long-term pricing type for the device should
// be renewed.
IsLongTermPricingAutoRenew *bool `type:"boolean"`
// The type of long-term pricing option you want for the device, either 1-year
// or 3-year long-term pricing.
//
// LongTermPricingType is a required field
LongTermPricingType *string `type:"string" required:"true" enum:"LongTermPricingType"`
// The type of AWS Snow Family device to use for the long-term pricing job.
SnowballType *string `type:"string" enum:"Type"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateLongTermPricingInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateLongTermPricingInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateLongTermPricingInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateLongTermPricingInput"}
if s.LongTermPricingType == nil {
invalidParams.Add(request.NewErrParamRequired("LongTermPricingType"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIsLongTermPricingAutoRenew sets the IsLongTermPricingAutoRenew field's value.
func (s *CreateLongTermPricingInput) SetIsLongTermPricingAutoRenew(v bool) *CreateLongTermPricingInput {
s.IsLongTermPricingAutoRenew = &v
return s
}
// SetLongTermPricingType sets the LongTermPricingType field's value.
func (s *CreateLongTermPricingInput) SetLongTermPricingType(v string) *CreateLongTermPricingInput {
s.LongTermPricingType = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *CreateLongTermPricingInput) SetSnowballType(v string) *CreateLongTermPricingInput {
s.SnowballType = &v
return s
}
type CreateLongTermPricingOutput struct {
_ struct{} `type:"structure"`
// The ID of the long-term pricing type for the device.
LongTermPricingId *string `min:"41" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateLongTermPricingOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateLongTermPricingOutput) GoString() string {
return s.String()
}
// SetLongTermPricingId sets the LongTermPricingId field's value.
func (s *CreateLongTermPricingOutput) SetLongTermPricingId(v string) *CreateLongTermPricingOutput {
s.LongTermPricingId = &v
return s
}
type CreateReturnShippingLabelInput struct {
_ struct{} `type:"structure"`
// The ID for a job that you want to create the return shipping label for; for
// example, JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
// The shipping speed for a particular job. This speed doesn't dictate how soon
// the device is returned to AWS. This speed represents how quickly it moves
// to its destination while in transit. Regional shipping speeds are as follows:
ShippingOption *string `type:"string" enum:"ShippingOption"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateReturnShippingLabelInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateReturnShippingLabelInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateReturnShippingLabelInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateReturnShippingLabelInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *CreateReturnShippingLabelInput) SetJobId(v string) *CreateReturnShippingLabelInput {
s.JobId = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *CreateReturnShippingLabelInput) SetShippingOption(v string) *CreateReturnShippingLabelInput {
s.ShippingOption = &v
return s
}
type CreateReturnShippingLabelOutput struct {
_ struct{} `type:"structure"`
// The status information of the task on a Snow device that is being returned
// to AWS.
Status *string `type:"string" enum:"ShippingLabelStatus"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateReturnShippingLabelOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateReturnShippingLabelOutput) GoString() string {
return s.String()
}
// SetStatus sets the Status field's value.
func (s *CreateReturnShippingLabelOutput) SetStatus(v string) *CreateReturnShippingLabelOutput {
s.Status = &v
return s
}
// Defines the real-time status of a Snow device's data transfer while the device
// is at AWS. This data is only available while a job has a JobState value of
// InProgress, for both import and export jobs.
type DataTransfer struct {
_ struct{} `type:"structure"`
// The number of bytes transferred between a Snow device and Amazon S3.
BytesTransferred *int64 `type:"long"`
// The number of objects transferred between a Snow device and Amazon S3.
ObjectsTransferred *int64 `type:"long"`
// The total bytes of data for a transfer between a Snow device and Amazon S3.
// This value is set to 0 (zero) until all the keys that will be transferred
// have been listed.
TotalBytes *int64 `type:"long"`
// The total number of objects for a transfer between a Snow device and Amazon
// S3. This value is set to 0 (zero) until all the keys that will be transferred
// have been listed.
TotalObjects *int64 `type:"long"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DataTransfer) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DataTransfer) GoString() string {
return s.String()
}
// SetBytesTransferred sets the BytesTransferred field's value.
func (s *DataTransfer) SetBytesTransferred(v int64) *DataTransfer {
s.BytesTransferred = &v
return s
}
// SetObjectsTransferred sets the ObjectsTransferred field's value.
func (s *DataTransfer) SetObjectsTransferred(v int64) *DataTransfer {
s.ObjectsTransferred = &v
return s
}
// SetTotalBytes sets the TotalBytes field's value.
func (s *DataTransfer) SetTotalBytes(v int64) *DataTransfer {
s.TotalBytes = &v
return s
}
// SetTotalObjects sets the TotalObjects field's value.
func (s *DataTransfer) SetTotalObjects(v int64) *DataTransfer {
s.TotalObjects = &v
return s
}
type DescribeAddressInput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a specific address.
//
// AddressId is a required field
AddressId *string `min:"40" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeAddressInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeAddressInput"}
if s.AddressId == nil {
invalidParams.Add(request.NewErrParamRequired("AddressId"))
}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *DescribeAddressInput) SetAddressId(v string) *DescribeAddressInput {
s.AddressId = &v
return s
}
type DescribeAddressOutput struct {
_ struct{} `type:"structure"`
// The address that you want the Snow device(s) associated with a specific job
// to be shipped to.
Address *Address `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressOutput) GoString() string {
return s.String()
}
// SetAddress sets the Address field's value.
func (s *DescribeAddressOutput) SetAddress(v *Address) *DescribeAddressOutput {
s.Address = v
return s
}
type DescribeAddressesInput struct {
_ struct{} `type:"structure"`
// The number of ADDRESS objects to return.
MaxResults *int64 `type:"integer"`
// HTTP requests are stateless. To identify what object comes "next" in the
// list of ADDRESS objects, you have the option of specifying a value for NextToken
// as the starting point for your list of returned addresses.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeAddressesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeAddressesInput"}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *DescribeAddressesInput) SetMaxResults(v int64) *DescribeAddressesInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *DescribeAddressesInput) SetNextToken(v string) *DescribeAddressesInput {
s.NextToken = &v
return s
}
type DescribeAddressesOutput struct {
_ struct{} `type:"structure"`
// The Snow device shipping addresses that were created for this account.
Addresses []*Address `type:"list"`
// HTTP requests are stateless. If you use the automatically generated NextToken
// value in your next DescribeAddresses call, your list of returned addresses
// will start from this point in the array.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeAddressesOutput) GoString() string {
return s.String()
}
// SetAddresses sets the Addresses field's value.
func (s *DescribeAddressesOutput) SetAddresses(v []*Address) *DescribeAddressesOutput {
s.Addresses = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *DescribeAddressesOutput) SetNextToken(v string) *DescribeAddressesOutput {
s.NextToken = &v
return s
}
type DescribeClusterInput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a cluster.
//
// ClusterId is a required field
ClusterId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeClusterInput"}
if s.ClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("ClusterId"))
}
if s.ClusterId != nil && len(*s.ClusterId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ClusterId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetClusterId sets the ClusterId field's value.
func (s *DescribeClusterInput) SetClusterId(v string) *DescribeClusterInput {
s.ClusterId = &v
return s
}
type DescribeClusterOutput struct {
_ struct{} `type:"structure"`
// Information about a specific cluster, including shipping information, cluster
// status, and other important metadata.
ClusterMetadata *ClusterMetadata `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeClusterOutput) GoString() string {
return s.String()
}
// SetClusterMetadata sets the ClusterMetadata field's value.
func (s *DescribeClusterOutput) SetClusterMetadata(v *ClusterMetadata) *DescribeClusterOutput {
s.ClusterMetadata = v
return s
}
type DescribeJobInput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeJobInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *DescribeJobInput) SetJobId(v string) *DescribeJobInput {
s.JobId = &v
return s
}
type DescribeJobOutput struct {
_ struct{} `type:"structure"`
// Information about a specific job, including shipping information, job status,
// and other important metadata.
JobMetadata *JobMetadata `type:"structure"`
// Information about a specific job part (in the case of an export job), including
// shipping information, job status, and other important metadata.
SubJobMetadata []*JobMetadata `type:"list"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeJobOutput) GoString() string {
return s.String()
}
// SetJobMetadata sets the JobMetadata field's value.
func (s *DescribeJobOutput) SetJobMetadata(v *JobMetadata) *DescribeJobOutput {
s.JobMetadata = v
return s
}
// SetSubJobMetadata sets the SubJobMetadata field's value.
func (s *DescribeJobOutput) SetSubJobMetadata(v []*JobMetadata) *DescribeJobOutput {
s.SubJobMetadata = v
return s
}
type DescribeReturnShippingLabelInput struct {
_ struct{} `type:"structure"`
// The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeReturnShippingLabelInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeReturnShippingLabelInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeReturnShippingLabelInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeReturnShippingLabelInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *DescribeReturnShippingLabelInput) SetJobId(v string) *DescribeReturnShippingLabelInput {
s.JobId = &v
return s
}
type DescribeReturnShippingLabelOutput struct {
_ struct{} `type:"structure"`
// The expiration date of the current return shipping label.
ExpirationDate *time.Time `type:"timestamp"`
// The status information of the task on a Snow device that is being returned
// to AWS.
Status *string `type:"string" enum:"ShippingLabelStatus"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeReturnShippingLabelOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DescribeReturnShippingLabelOutput) GoString() string {
return s.String()
}
// SetExpirationDate sets the ExpirationDate field's value.
func (s *DescribeReturnShippingLabelOutput) SetExpirationDate(v time.Time) *DescribeReturnShippingLabelOutput {
s.ExpirationDate = &v
return s
}
// SetStatus sets the Status field's value.
func (s *DescribeReturnShippingLabelOutput) SetStatus(v string) *DescribeReturnShippingLabelOutput {
s.Status = &v
return s
}
// The container for SnowconeDeviceConfiguration.
type DeviceConfiguration struct {
_ struct{} `type:"structure"`
// Returns information about the device configuration for an AWS Snowcone job.
SnowconeDeviceConfiguration *SnowconeDeviceConfiguration `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DeviceConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DeviceConfiguration) GoString() string {
return s.String()
}
// SetSnowconeDeviceConfiguration sets the SnowconeDeviceConfiguration field's value.
func (s *DeviceConfiguration) SetSnowconeDeviceConfiguration(v *SnowconeDeviceConfiguration) *DeviceConfiguration {
s.SnowconeDeviceConfiguration = v
return s
}
// A JSON-formatted object that contains the IDs for an Amazon Machine Image
// (AMI), including the Amazon EC2 AMI ID and the Snow device AMI ID. Each AMI
// has these two IDs to simplify identifying the AMI in both the AWS Cloud and
// on the device.
type Ec2AmiResource struct {
_ struct{} `type:"structure"`
// The ID of the AMI in Amazon EC2.
//
// AmiId is a required field
AmiId *string `min:"12" type:"string" required:"true"`
// The ID of the AMI on the Snow device.
SnowballAmiId *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Ec2AmiResource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Ec2AmiResource) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Ec2AmiResource) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Ec2AmiResource"}
if s.AmiId == nil {
invalidParams.Add(request.NewErrParamRequired("AmiId"))
}
if s.AmiId != nil && len(*s.AmiId) < 12 {
invalidParams.Add(request.NewErrParamMinLen("AmiId", 12))
}
if s.SnowballAmiId != nil && len(*s.SnowballAmiId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("SnowballAmiId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAmiId sets the AmiId field's value.
func (s *Ec2AmiResource) SetAmiId(v string) *Ec2AmiResource {
s.AmiId = &v
return s
}
// SetSnowballAmiId sets the SnowballAmiId field's value.
func (s *Ec2AmiResource) SetSnowballAmiId(v string) *Ec2AmiResource {
s.SnowballAmiId = &v
return s
}
// Your IAM user lacks the necessary Amazon EC2 permissions to perform the attempted
// action.
type Ec2RequestFailedException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Ec2RequestFailedException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Ec2RequestFailedException) GoString() string {
return s.String()
}
func newErrorEc2RequestFailedException(v protocol.ResponseMetadata) error {
return &Ec2RequestFailedException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *Ec2RequestFailedException) Code() string {
return "Ec2RequestFailedException"
}
// Message returns the exception's message.
func (s *Ec2RequestFailedException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *Ec2RequestFailedException) OrigErr() error {
return nil
}
func (s *Ec2RequestFailedException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *Ec2RequestFailedException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *Ec2RequestFailedException) RequestID() string {
return s.RespMetadata.RequestID
}
// The container for the EventTriggerDefinition$EventResourceARN.
type EventTriggerDefinition struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an
// AWS Lambda function's event trigger associated with this job.
EventResourceARN *string `type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EventTriggerDefinition) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EventTriggerDefinition) GoString() string {
return s.String()
}
// SetEventResourceARN sets the EventResourceARN field's value.
func (s *EventTriggerDefinition) SetEventResourceARN(v string) *EventTriggerDefinition {
s.EventResourceARN = &v
return s
}
type GetJobManifestInput struct {
_ struct{} `type:"structure"`
// The ID for a job that you want to get the manifest file for, for example
// JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobManifestInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobManifestInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetJobManifestInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetJobManifestInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39
|
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *GetJobManifestInput) SetJobId(v string) *GetJobManifestInput {
s.JobId = &v
return s
}
type GetJobManifestOutput struct {
_ struct{} `type:"structure"`
// The Amazon S3 presigned URL for the manifest file associated with the specified
// JobId value.
ManifestURI *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobManifestOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobManifestOutput) GoString() string {
return s.String()
}
// SetManifestURI sets the ManifestURI field's value.
func (s *GetJobManifestOutput) SetManifestURI(v string) *GetJobManifestOutput {
s.ManifestURI = &v
return s
}
type GetJobUnlockCodeInput struct {
_ struct{} `type:"structure"`
// The ID for the job that you want to get the UnlockCode value for, for example
// JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobUnlockCodeInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobUnlockCodeInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetJobUnlockCodeInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetJobUnlockCodeInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *GetJobUnlockCodeInput) SetJobId(v string) *GetJobUnlockCodeInput {
s.JobId = &v
return s
}
type GetJobUnlockCodeOutput struct {
_ struct{} `type:"structure"`
// The UnlockCode value for the specified job. The UnlockCode value can be accessed
// for up to 360 days after the job has been created.
UnlockCode *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobUnlockCodeOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetJobUnlockCodeOutput) GoString() string {
return s.String()
}
// SetUnlockCode sets the UnlockCode field's value.
func (s *GetJobUnlockCodeOutput) SetUnlockCode(v string) *GetJobUnlockCodeOutput {
s.UnlockCode = &v
return s
}
type GetSnowballUsageInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSnowballUsageInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSnowballUsageInput) GoString() string {
return s.String()
}
type GetSnowballUsageOutput struct {
_ struct{} `type:"structure"`
// The service limit for number of Snow devices this account can have at once.
// The default service limit is 1 (one).
SnowballLimit *int64 `type:"integer"`
// The number of Snow devices that this account is currently using.
SnowballsInUse *int64 `type:"integer"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSnowballUsageOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSnowballUsageOutput) GoString() string {
return s.String()
}
// SetSnowballLimit sets the SnowballLimit field's value.
func (s *GetSnowballUsageOutput) SetSnowballLimit(v int64) *GetSnowballUsageOutput {
s.SnowballLimit = &v
return s
}
// SetSnowballsInUse sets the SnowballsInUse field's value.
func (s *GetSnowballUsageOutput) SetSnowballsInUse(v int64) *GetSnowballUsageOutput {
s.SnowballsInUse = &v
return s
}
type GetSoftwareUpdatesInput struct {
_ struct{} `type:"structure"`
// The ID for a job that you want to get the software update file for, for example
// JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSoftwareUpdatesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSoftwareUpdatesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetSoftwareUpdatesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetSoftwareUpdatesInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *GetSoftwareUpdatesInput) SetJobId(v string) *GetSoftwareUpdatesInput {
s.JobId = &v
return s
}
type GetSoftwareUpdatesOutput struct {
_ struct{} `type:"structure"`
// The Amazon S3 presigned URL for the update file associated with the specified
// JobId value. The software update will be available for 2 days after this
// request is made. To access an update after the 2 days have passed, you'll
// have to make another call to GetSoftwareUpdates.
UpdatesURI *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSoftwareUpdatesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSoftwareUpdatesOutput) GoString() string {
return s.String()
}
// SetUpdatesURI sets the UpdatesURI field's value.
func (s *GetSoftwareUpdatesOutput) SetUpdatesURI(v string) *GetSoftwareUpdatesOutput {
s.UpdatesURI = &v
return s
}
// The tax documents required in AWS Regions in India.
type INDTaxDocuments struct {
_ struct{} `type:"structure"`
// The Goods and Services Tax (GST) documents required in AWS Regions in India.
GSTIN *string `type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s INDTaxDocuments) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s INDTaxDocuments) GoString() string {
return s.String()
}
// SetGSTIN sets the GSTIN field's value.
func (s *INDTaxDocuments) SetGSTIN(v string) *INDTaxDocuments {
s.GSTIN = &v
return s
}
// The address provided was invalid. Check the address with your region's carrier,
// and try again.
type InvalidAddressException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidAddressException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidAddressException) GoString() string {
return s.String()
}
func newErrorInvalidAddressException(v protocol.ResponseMetadata) error {
return &InvalidAddressException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidAddressException) Code() string {
return "InvalidAddressException"
}
// Message returns the exception's message.
func (s *InvalidAddressException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidAddressException) OrigErr() error {
return nil
}
func (s *InvalidAddressException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidAddressException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidAddressException) RequestID() string {
return s.RespMetadata.RequestID
}
// Job or cluster creation failed. One or more inputs were invalid. Confirm
// that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType,
// and try again.
type InvalidInputCombinationException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidInputCombinationException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidInputCombinationException) GoString() string {
return s.String()
}
func newErrorInvalidInputCombinationException(v protocol.ResponseMetadata) error {
return &InvalidInputCombinationException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidInputCombinationException) Code() string {
return "InvalidInputCombinationException"
}
// Message returns the exception's message.
func (s *InvalidInputCombinationException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidInputCombinationException) OrigErr() error {
return nil
}
func (s *InvalidInputCombinationException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidInputCombinationException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidInputCombinationException) RequestID() string {
return s.RespMetadata.RequestID
}
// The action can't be performed because the job's current state doesn't allow
// that action to be performed.
type InvalidJobStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidJobStateException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidJobStateException) GoString() string {
return s.String()
}
func newErrorInvalidJobStateException(v protocol.ResponseMetadata) error {
return &InvalidJobStateException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidJobStateException) Code() string {
return "InvalidJobStateException"
}
// Message returns the exception's message.
func (s *InvalidJobStateException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidJobStateException) OrigErr() error {
return nil
}
func (s *InvalidJobStateException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidJobStateException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidJobStateException) RequestID() string {
return s.RespMetadata.RequestID
}
// The NextToken string was altered unexpectedly, and the operation has stopped.
// Run the operation without changing the NextToken string, and try again.
type InvalidNextTokenException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidNextTokenException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidNextTokenException) GoString() string {
return s.String()
}
func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error {
return &InvalidNextTokenException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidNextTokenException) Code() string {
return "InvalidNextTokenException"
}
// Message returns the exception's message.
func (s *InvalidNextTokenException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidNextTokenException) OrigErr() error {
return nil
}
func (s *InvalidNextTokenException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidNextTokenException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidNextTokenException) RequestID() string {
return s.RespMetadata.RequestID
}
// The specified resource can't be found. Check the information you provided
// in your last request, and try again.
type InvalidResourceException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
// The provided resource value is invalid.
ResourceType *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidResourceException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidResourceException) GoString() string {
return s.String()
}
func newErrorInvalidResourceException(v protocol.ResponseMetadata) error {
return &InvalidResourceException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidResourceException) Code() string {
return "InvalidResourceException"
}
// Message returns the exception's message.
func (s *InvalidResourceException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidResourceException) OrigErr() error {
return nil
}
func (s *InvalidResourceException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidResourceException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidResourceException) RequestID() string {
return s.RespMetadata.RequestID
}
// Each JobListEntry object contains a job's state, a job's ID, and a value
// that indicates whether the job is a job part, in the case of an export job.
type JobListEntry struct {
_ struct{} `type:"structure"`
// The creation date for this job.
CreationDate *time.Time `type:"timestamp"`
// The optional description of this specific job, for example Important Photos
// 2016-08-11.
Description *string `min:"1" type:"string"`
// A value that indicates that this job is a main job. A main job represents
// a successful request to create an export job. Main jobs aren't associated
// with any Snowballs. Instead, each main job will have at least one job part,
// and each job part is associated with a Snowball. It might take some time
// before the job parts associated with a particular main job are listed, because
// they are created after the main job is created.
IsMaster *bool `type:"boolean"`
// The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.
JobId *string `min:"1" type:"string"`
// The current state of this job.
JobState *string `type:"string" enum:"JobState"`
// The type of job.
JobType *string `type:"string" enum:"JobType"`
// The type of device used with this job.
SnowballType *string `type:"string" enum:"Type"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobListEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobListEntry) GoString() string {
return s.String()
}
// SetCreationDate sets the CreationDate field's value.
func (s *JobListEntry) SetCreationDate(v time.Time) *JobListEntry {
s.CreationDate = &v
return s
}
// SetDescription sets the Description field's value.
func (s *JobListEntry) SetDescription(v string) *JobListEntry {
s.Description = &v
return s
}
// SetIsMaster sets the IsMaster field's value.
func (s *JobListEntry) SetIsMaster(v bool) *JobListEntry {
s.IsMaster = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *JobListEntry) SetJobId(v string) *JobListEntry {
s.JobId = &v
return s
}
// SetJobState sets the JobState field's value.
func (s *JobListEntry) SetJobState(v string) *JobListEntry {
s.JobState = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *JobListEntry) SetJobType(v string) *JobListEntry {
s.JobType = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *JobListEntry) SetSnowballType(v string) *JobListEntry {
s.SnowballType = &v
return s
}
// Contains job logs. Whenever a Snow device is used to import data into or
// export data out of Amazon S3, you'll have the option of downloading a PDF
// job report. Job logs are returned as a part of the response syntax of the
// DescribeJob action in the JobMetadata data type. The job logs can be accessed
// for up to 60 minutes after this request has been made. To access any of the
// job logs after 60 minutes have passed, you'll have to make another call to
// the DescribeJob action.
//
// For import jobs, the PDF job report becomes available at the end of the import
// process. For export jobs, your job report typically becomes available while
// the Snow device for your job part is being delivered to you.
//
// The job report provides you insight into the state of your Amazon S3 data
// transfer. The report includes details about your job or job part for your
// records.
//
// For deeper visibility into the status of your transferred objects, you can
// look at the two associated logs: a success log and a failure log. The logs
// are saved in comma-separated value (CSV) format, and the name of each log
// includes the ID of the job or job part that the log describes.
type JobLogs struct {
_ struct{} `type:"structure"`
// A link to an Amazon S3 presigned URL where the job completion report is located.
JobCompletionReportURI *string `min:"1" type:"string"`
// A link to an Amazon S3 presigned URL where the job failure log is located.
JobFailureLogURI *string `min:"1" type:"string"`
// A link to an Amazon S3 presigned URL where the job success log is located.
JobSuccessLogURI *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobLogs) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobLogs) GoString() string {
return s.String()
}
// SetJobCompletionReportURI sets the JobCompletionReportURI field's value.
func (s *JobLogs) SetJobCompletionReportURI(v string) *JobLogs {
s.JobCompletionReportURI = &v
return s
}
// SetJobFailureLogURI sets the JobFailureLogURI field's value.
func (s *JobLogs) SetJobFailureLogURI(v string) *JobLogs {
s.JobFailureLogURI = &v
return s
}
// SetJobSuccessLogURI sets the JobSuccessLogURI field's value.
func (s *JobLogs) SetJobSuccessLogURI(v string) *JobLogs {
s.JobSuccessLogURI = &v
return s
}
// Contains information about a specific job including shipping information,
// job status, and other important metadata. This information is returned as
// a part of the response syntax of the DescribeJob action.
type JobMetadata struct {
_ struct{} `type:"structure"`
// The ID for the address that you want the Snow device shipped to.
AddressId *string `min:"40" type:"string"`
// The 39-character ID for the cluster, for example CID123e4567-e89b-12d3-a456-426655440000.
ClusterId *string `min:"1" type:"string"`
// The creation date for this job.
CreationDate *time.Time `type:"timestamp"`
// A value that defines the real-time status of a Snow device's data transfer
// while the device is at AWS. This data is only available while a job has a
// JobState value of InProgress, for both import and export jobs.
DataTransferProgress *DataTransfer `type:"structure"`
// The description of the job, provided at job creation.
Description *string `min:"1" type:"string"`
// The container for SnowconeDeviceConfiguration.
DeviceConfiguration *DeviceConfiguration `type:"structure"`
// The ID of the address that you want a job shipped to, after it will be shipped
// to its primary address. This field is not supported in most regions.
ForwardingAddressId *string `min:"40" type:"string"`
// The automatically generated ID for a job, for example JID123e4567-e89b-12d3-a456-426655440000.
JobId *string `min:"1" type:"string"`
// Links to Amazon S3 presigned URLs for the job report and logs. For import
// jobs, the PDF job report becomes available at the end of the import process.
// For export jobs, your job report typically becomes available while the Snow
// device for your job part is being delivered to you.
JobLogInfo *JobLogs `type:"structure"`
// The current status of the jobs.
JobState *string `type:"string" enum:"JobState"`
// The type of job.
JobType *string `type:"string" enum:"JobType"`
// The Amazon Resource Name (ARN) for the AWS Key Management Service (AWS KMS)
// key associated with this job. This ARN was created using the CreateKey (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html)
// API action in AWS KMS.
KmsKeyARN *string `type:"string"`
// The ID of the long-term pricing type for the device.
LongTermPricingId *string `min:"41" type:"string"`
// The Amazon Simple Notification Service (Amazon SNS) notification settings
// associated with a specific job. The Notification object is returned as a
// part of the response syntax of the DescribeJob action in the JobMetadata
// data type.
Notification *Notification `type:"structure"`
// Represents metadata and configuration settings for services on an AWS Snow
// Family device.
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// Allows you to securely operate and manage Snowcone devices remotely from
// outside of your internal network. When set to INSTALLED_AUTOSTART, remote
// management will automatically be available when the device arrives at your
// location. Otherwise, you need to use the Snowball Client to manage the device.
RemoteManagement *string `type:"string" enum:"RemoteManagement"`
// An array of S3Resource objects. Each S3Resource object represents an Amazon
// S3 bucket that your transferred data will be exported from or imported into.
Resources *JobResource `type:"structure"`
// The role ARN associated with this job. This ARN was created using the CreateRole
// (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)
// API action in AWS Identity and Access Management (IAM).
RoleARN *string `type:"string"`
// A job's shipping information, including inbound and outbound tracking numbers
// and shipping speed options.
ShippingDetails *ShippingDetails `type:"structure"`
// The Snow device capacity preference for this job, specified at job creation.
// In US regions, you can choose between 50 TB and 80 TB Snowballs. All other
// regions use 80 TB capacity Snowballs.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
SnowballCapacityPreference *string `type:"string" enum:"Capacity"`
// The type of device used with this job.
SnowballType *string `type:"string" enum:"Type"`
// The metadata associated with the tax documents required in your AWS Region.
TaxDocuments *TaxDocuments `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobMetadata) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobMetadata) GoString() string {
return s.String()
}
// SetAddressId sets the AddressId field's value.
func (s *JobMetadata) SetAddressId(v string) *JobMetadata {
s.AddressId = &v
return s
}
// SetClusterId sets the ClusterId field's value.
func (s *JobMetadata) SetClusterId(v string) *JobMetadata {
s.ClusterId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *JobMetadata) SetCreationDate(v time.Time) *JobMetadata {
s.CreationDate = &v
return s
}
// SetDataTransferProgress sets the DataTransferProgress field's value.
func (s *JobMetadata) SetDataTransferProgress(v *DataTransfer) *JobMetadata {
s.DataTransferProgress = v
return s
}
// SetDescription sets the Description field's value.
func (s *JobMetadata) SetDescription(v string) *JobMetadata {
s.Description = &v
return s
}
// SetDeviceConfiguration sets the DeviceConfiguration field's value.
func (s *JobMetadata) SetDeviceConfiguration(v *DeviceConfiguration) *JobMetadata {
s.DeviceConfiguration = v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *JobMetadata) SetForwardingAddressId(v string) *JobMetadata {
s.ForwardingAddressId = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *JobMetadata) SetJobId(v string) *JobMetadata {
s.JobId = &v
return s
}
// SetJobLogInfo sets the JobLogInfo field's value.
func (s *JobMetadata) SetJobLogInfo(v *JobLogs) *JobMetadata {
s.JobLogInfo = v
return s
}
// SetJobState sets the JobState field's value.
func (s *JobMetadata) SetJobState(v string) *JobMetadata {
s.JobState = &v
return s
}
// SetJobType sets the JobType field's value.
func (s *JobMetadata) SetJobType(v string) *JobMetadata {
s.JobType = &v
return s
}
// SetKmsKeyARN sets the KmsKeyARN field's value.
func (s *JobMetadata) SetKmsKeyARN(v string) *JobMetadata {
s.KmsKeyARN = &v
return s
}
// SetLongTermPricingId sets the LongTermPricingId field's value.
func (s *JobMetadata) SetLongTermPricingId(v string) *JobMetadata {
s.LongTermPricingId = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *JobMetadata) SetNotification(v *Notification) *JobMetadata {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *JobMetadata) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *JobMetadata {
s.OnDeviceServiceConfiguration = v
return s
}
// SetRemoteManagement sets the RemoteManagement field's value.
func (s *JobMetadata) SetRemoteManagement(v string) *JobMetadata {
s.RemoteManagement = &v
return s
}
// SetResources sets the Resources field's value.
func (s *JobMetadata) SetResources(v *JobResource) *JobMetadata {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *JobMetadata) SetRoleARN(v string) *JobMetadata {
s.RoleARN = &v
return s
}
// SetShippingDetails sets the ShippingDetails field's value.
func (s *JobMetadata) SetShippingDetails(v *ShippingDetails) *JobMetadata {
s.ShippingDetails = v
return s
}
// SetSnowballCapacityPreference sets the SnowballCapacityPreference field's value.
func (s *JobMetadata) SetSnowballCapacityPreference(v string) *JobMetadata {
s.SnowballCapacityPreference = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *JobMetadata) SetSnowballType(v string) *JobMetadata {
s.SnowballType = &v
return s
}
// SetTaxDocuments sets the TaxDocuments field's value.
func (s *JobMetadata) SetTaxDocuments(v *TaxDocuments) *JobMetadata {
s.TaxDocuments = v
return s
}
// Contains an array of AWS resource objects. Each object represents an Amazon
// S3 bucket, an AWS Lambda function, or an Amazon Machine Image (AMI) based
// on Amazon EC2 that is associated with a particular job.
type JobResource struct {
_ struct{} `type:"structure"`
// The Amazon Machine Images (AMIs) associated with this job.
Ec2AmiResources []*Ec2AmiResource `type:"list"`
// The Python-language Lambda functions for this job.
LambdaResources []*LambdaResource `type:"list"`
// An array of S3Resource objects.
S3Resources []*S3Resource `type:"list"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobResource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s JobResource) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *JobResource) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "JobResource"}
if s.Ec2AmiResources != nil {
for i, v := range s.Ec2AmiResources {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Ec2AmiResources", i), err.(request.ErrInvalidParams))
}
}
}
if s.S3Resources != nil {
for i, v := range s.S3Resources {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "S3Resources", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetEc2AmiResources sets the Ec2AmiResources field's value.
func (s *JobResource) SetEc2AmiResources(v []*Ec2AmiResource) *JobResource {
s.Ec2AmiResources = v
return s
}
// SetLambdaResources sets the LambdaResources field's value.
func (s *JobResource) SetLambdaResources(v []*LambdaResource) *JobResource {
s.LambdaResources = v
return s
}
// SetS3Resources sets the S3Resources field's value.
func (s *JobResource) SetS3Resources(v []*S3Resource) *JobResource {
s.S3Resources = v
return s
}
// The provided AWS Key Management Service key lacks the permissions to perform
// the specified CreateJob or UpdateJob action.
type KMSRequestFailedException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s KMSRequestFailedException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s KMSRequestFailedException) GoString() string {
return s.String()
}
func newErrorKMSRequestFailedException(v protocol.ResponseMetadata) error {
return &KMSRequestFailedException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *KMSRequestFailedException) Code() string {
return "KMSRequestFailedException"
}
// Message returns the exception's message.
func (s *KMSRequestFailedException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *KMSRequestFailedException) OrigErr() error {
return nil
}
func (s *KMSRequestFailedException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *KMSRequestFailedException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *KMSRequestFailedException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains a key range. For export jobs, a S3Resource object can have an optional
// KeyRange value. The length of the range is defined at job creation, and has
// either an inclusive BeginMarker, an inclusive EndMarker, or both. Ranges
// are UTF-8 binary sorted.
type KeyRange struct {
_ struct{} `type:"structure"`
// The key that starts an optional key range for an export job. Ranges are inclusive
// and UTF-8 binary sorted.
BeginMarker *string `min:"1" type:"string"`
// The key that ends an optional key range for an export job. Ranges are inclusive
// and UTF-8 binary sorted.
EndMarker *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s KeyRange) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s KeyRange) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *KeyRange) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "KeyRange"}
if s.BeginMarker != nil && len(*s.BeginMarker) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BeginMarker", 1))
}
if s.EndMarker != nil && len(*s.EndMarker) < 1 {
invalidParams.Add(request.NewErrParamMinLen("EndMarker", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBeginMarker sets the BeginMarker field's value.
func (s *KeyRange) SetBeginMarker(v string) *KeyRange {
s.BeginMarker = &v
return s
}
// SetEndMarker sets the EndMarker field's value.
func (s *KeyRange) SetEndMarker(v string) *KeyRange {
s.EndMarker = &v
return s
}
// Identifies
type LambdaResource struct {
_ struct{} `type:"structure"`
// The array of ARNs for S3Resource objects to trigger the LambdaResource objects
// associated with this job.
EventTriggers []*EventTriggerDefinition `type:"list"`
// An Amazon Resource Name (ARN) that represents an AWS Lambda function to be
// triggered by PUT object actions on the associated local Amazon S3 resource.
LambdaArn *string `type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LambdaResource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LambdaResource) GoString() string {
return s.String()
}
// SetEventTriggers sets the EventTriggers field's value.
func (s *LambdaResource) SetEventTriggers(v []*EventTriggerDefinition) *LambdaResource {
s.EventTriggers = v
return s
}
// SetLambdaArn sets the LambdaArn field's value.
func (s *LambdaResource) SetLambdaArn(v string) *LambdaResource {
s.LambdaArn = &v
return s
}
type ListClusterJobsInput struct {
_ struct{} `type:"structure"`
// The 39-character ID for the cluster that you want to list, for example CID123e4567-e89b-12d3-a456-426655440000.
//
// ClusterId is a required field
ClusterId *string `min:"39" type:"string" required:"true"`
// The number of JobListEntry objects to return.
MaxResults *int64 `type:"integer"`
// HTTP requests are stateless. To identify what object comes "next" in the
// list of JobListEntry objects, you have the option of specifying NextToken
// as the starting point for your returned list.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClusterJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClusterJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListClusterJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListClusterJobsInput"}
if s.ClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("ClusterId"))
}
if s.ClusterId != nil && len(*s.ClusterId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ClusterId", 39))
}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetClusterId sets the ClusterId field's value.
func (s *ListClusterJobsInput) SetClusterId(v string) *ListClusterJobsInput {
s.ClusterId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListClusterJobsInput) SetMaxResults(v int64) *ListClusterJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListClusterJobsInput) SetNextToken(v string) *ListClusterJobsInput {
s.NextToken = &v
return s
}
type ListClusterJobsOutput struct {
_ struct{} `type:"structure"`
// Each JobListEntry object contains a job's state, a job's ID, and a value
// that indicates whether the job is a job part, in the case of export jobs.
JobListEntries []*JobListEntry `type:"list"`
// HTTP requests are stateless. If you use the automatically generated NextToken
// value in your next ListClusterJobsResult call, your list of returned jobs
// will start from this point in the array.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClusterJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClusterJobsOutput) GoString() string {
return s.String()
}
// SetJobListEntries sets the JobListEntries field's value.
func (s *ListClusterJobsOutput) SetJobListEntries(v []*JobListEntry) *ListClusterJobsOutput {
s.JobListEntries = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListClusterJobsOutput) SetNextToken(v string) *ListClusterJobsOutput {
s.NextToken = &v
return s
}
type ListClustersInput struct {
_ struct{} `type:"structure"`
// The number of ClusterListEntry objects to return.
MaxResults *int64 `type:"integer"`
// HTTP requests are stateless. To identify what object comes "next" in the
// list of ClusterListEntry objects, you have the option of specifying NextToken
// as the starting point for your returned list.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClustersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClustersInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListClustersInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListClustersInput"}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput {
s.NextToken = &v
return s
}
type ListClustersOutput struct {
_ struct{} `type:"structure"`
// Each ClusterListEntry object contains a cluster's state, a cluster's ID,
// and other important status information.
ClusterListEntries []*ClusterListEntry `type:"list"`
// HTTP requests are stateless. If you use the automatically generated NextToken
// value in your next ClusterListEntry call, your list of returned clusters
// will start from this point in the array.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClustersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListClustersOutput) GoString() string {
return s.String()
}
// SetClusterListEntries sets the ClusterListEntries field's value.
func (s *ListClustersOutput) SetClusterListEntries(v []*ClusterListEntry) *ListClustersOutput {
s.ClusterListEntries = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput {
s.NextToken = &v
return s
}
type ListCompatibleImagesInput struct {
_ struct{} `type:"structure"`
// The maximum number of results for the list of compatible images. Currently,
// a Snowball Edge device can store 10 AMIs.
MaxResults *int64 `type:"integer"`
// HTTP requests are stateless. To identify what object comes "next" in the
// list of compatible images, you can specify a value for NextToken as the starting
// point for your list of returned images.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListCompatibleImagesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListCompatibleImagesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListCompatibleImagesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListCompatibleImagesInput"}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListCompatibleImagesInput) SetMaxResults(v int64) *ListCompatibleImagesInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListCompatibleImagesInput) SetNextToken(v string) *ListCompatibleImagesInput {
s.NextToken = &v
return s
}
type ListCompatibleImagesOutput struct {
_ struct{} `type:"structure"`
// A JSON-formatted object that describes a compatible AMI, including the ID
// and name for a Snow device AMI.
CompatibleImages []*CompatibleImage `type:"list"`
// Because HTTP requests are stateless, this is the starting point for your
// next list of returned images.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListCompatibleImagesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListCompatibleImagesOutput) GoString() string {
return s.String()
}
// SetCompatibleImages sets the CompatibleImages field's value.
func (s *ListCompatibleImagesOutput) SetCompatibleImages(v []*CompatibleImage) *ListCompatibleImagesOutput {
s.CompatibleImages = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListCompatibleImagesOutput) SetNextToken(v string) *ListCompatibleImagesOutput {
s.NextToken = &v
return s
}
type ListJobsInput struct {
_ struct{} `type:"structure"`
// The number of JobListEntry objects to return.
MaxResults *int64 `type:"integer"`
// HTTP requests are stateless. To identify what object comes "next" in the
// list of JobListEntry objects, you have the option of specifying NextToken
// as the starting point for your returned list.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput {
s.NextToken = &v
return s
}
type ListJobsOutput struct {
_ struct{} `type:"structure"`
// Each JobListEntry object contains a job's state, a job's ID, and a value
// that indicates whether the job is a job part, in the case of export jobs.
JobListEntries []*JobListEntry `type:"list"`
// HTTP requests are stateless. If you use this automatically generated NextToken
// value in your next ListJobs call, your returned JobListEntry objects will
// start from this point in the array.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListJobsOutput) GoString() string {
return s.String()
}
// SetJobListEntries sets the JobListEntries field's value.
func (s *ListJobsOutput) SetJobListEntries(v []*JobListEntry) *ListJobsOutput {
s.JobListEntries = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput {
s.NextToken = &v
return s
}
type ListLongTermPricingInput struct {
_ struct{} `type:"structure"`
// The maximum number of ListLongTermPricing objects to return.
MaxResults *int64 `type:"integer"`
// Because HTTP requests are stateless, this is the starting point for your
// next list of ListLongTermPricing to return.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListLongTermPricingInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListLongTermPricingInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListLongTermPricingInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListLongTermPricingInput"}
if s.NextToken != nil && len(*s.NextToken) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListLongTermPricingInput) SetMaxResults(v int64) *ListLongTermPricingInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListLongTermPricingInput) SetNextToken(v string) *ListLongTermPricingInput {
s.NextToken = &v
return s
}
type ListLongTermPricingOutput struct {
_ struct{} `type:"structure"`
// Each LongTermPricingEntry object contains a status, ID, and other information
// about the LongTermPricing type.
LongTermPricingEntries []*LongTermPricingListEntry `type:"list"`
// Because HTTP requests are stateless, this is the starting point for your
// next list of returned ListLongTermPricing list.
NextToken *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListLongTermPricingOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListLongTermPricingOutput) GoString() string {
return s.String()
}
// SetLongTermPricingEntries sets the LongTermPricingEntries field's value.
func (s *ListLongTermPricingOutput) SetLongTermPricingEntries(v []*LongTermPricingListEntry) *ListLongTermPricingOutput {
s.LongTermPricingEntries = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListLongTermPricingOutput) SetNextToken(v string) *ListLongTermPricingOutput {
s.NextToken = &v
return s
}
// Each LongTermPricingListEntry object contains information about a long-term
// pricing type.
type LongTermPricingListEntry struct {
_ struct{} `type:"structure"`
// The current active jobs on the device the long-term pricing type.
CurrentActiveJob *string `min:"39" type:"string"`
// If set to true, specifies that the current long-term pricing type for the
// device should be automatically renewed before the long-term pricing contract
// expires.
IsLongTermPricingAutoRenew *bool `type:"boolean"`
// The IDs of the jobs that are associated with a long-term pricing type.
JobIds []*string `type:"list"`
// The end date the long-term pricing contract.
LongTermPricingEndDate *time.Time `type:"timestamp"`
// The ID of the long-term pricing type for the device.
LongTermPricingId *string `min:"41" type:"string"`
// The start date of the long-term pricing contract.
LongTermPricingStartDate *time.Time `type:"timestamp"`
// The status of the long-term pricing type.
LongTermPricingStatus *string `min:"1" type:"string"`
// The type of long-term pricing that was selected for the device.
LongTermPricingType *string `type:"string" enum:"LongTermPricingType"`
// A new device that replaces a device that is ordered with long-term pricing.
ReplacementJob *string `min:"39" type:"string"`
// The type of AWS Snow Family device associated with this long-term pricing
// job.
SnowballType *string `type:"string" enum:"Type"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LongTermPricingListEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LongTermPricingListEntry) GoString() string {
return s.String()
}
// SetCurrentActiveJob sets the CurrentActiveJob field's value.
func (s *LongTermPricingListEntry) SetCurrentActiveJob(v string) *LongTermPricingListEntry {
s.CurrentActiveJob = &v
return s
}
// SetIsLongTermPricingAutoRenew sets the IsLongTermPricingAutoRenew field's value.
func (s *LongTermPricingListEntry) SetIsLongTermPricingAutoRenew(v bool) *LongTermPricingListEntry {
s.IsLongTermPricingAutoRenew = &v
return s
}
// SetJobIds sets the JobIds field's value.
func (s *LongTermPricingListEntry) SetJobIds(v []*string) *LongTermPricingListEntry {
s.JobIds = v
return s
}
// SetLongTermPricingEndDate sets the LongTermPricingEndDate field's value.
func (s *LongTermPricingListEntry) SetLongTermPricingEndDate(v time.Time) *LongTermPricingListEntry {
s.LongTermPricingEndDate = &v
return s
}
// SetLongTermPricingId sets the LongTermPricingId field's value.
func (s *LongTermPricingListEntry) SetLongTermPricingId(v string) *LongTermPricingListEntry {
s.LongTermPricingId = &v
return s
}
// SetLongTermPricingStartDate sets the LongTermPricingStartDate field's value.
func (s *LongTermPricingListEntry) SetLongTermPricingStartDate(v time.Time) *LongTermPricingListEntry {
s.LongTermPricingStartDate = &v
return s
}
// SetLongTermPricingStatus sets the LongTermPricingStatus field's value.
func (s *LongTermPricingListEntry) SetLongTermPricingStatus(v string) *LongTermPricingListEntry {
s.LongTermPricingStatus = &v
return s
}
// SetLongTermPricingType sets the LongTermPricingType field's value.
func (s *LongTermPricingListEntry) SetLongTermPricingType(v string) *LongTermPricingListEntry {
s.LongTermPricingType = &v
return s
}
// SetReplacementJob sets the ReplacementJob field's value.
func (s *LongTermPricingListEntry) SetReplacementJob(v string) *LongTermPricingListEntry {
s.ReplacementJob = &v
return s
}
// SetSnowballType sets the SnowballType field's value.
func (s *LongTermPricingListEntry) SetSnowballType(v string) *LongTermPricingListEntry {
s.SnowballType = &v
return s
}
// An object that represents metadata and configuration settings for NFS service
// on an AWS Snow Family device.
type NFSOnDeviceServiceConfiguration struct {
_ struct{} `type:"structure"`
// The maximum NFS storage for one Snowball Family device.
StorageLimit *int64 `type:"integer"`
// The scale unit of the NFS storage on the device.
//
// Valid values: TB.
StorageUnit *string `type:"string" enum:"StorageUnit"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s NFSOnDeviceServiceConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s NFSOnDeviceServiceConfiguration) GoString() string {
return s.String()
}
// SetStorageLimit sets the StorageLimit field's value.
func (s *NFSOnDeviceServiceConfiguration) SetStorageLimit(v int64) *NFSOnDeviceServiceConfiguration {
s.StorageLimit = &v
return s
}
// SetStorageUnit sets the StorageUnit field's value.
func (s *NFSOnDeviceServiceConfiguration) SetStorageUnit(v string) *NFSOnDeviceServiceConfiguration {
s.StorageUnit = &v
return s
}
// The Amazon Simple Notification Service (Amazon SNS) notification settings
// associated with a specific job. The Notification object is returned as a
// part of the response syntax of the DescribeJob action in the JobMetadata
// data type.
//
// When the notification settings are defined during job creation, you can choose
// to notify based on a specific set of job states using the JobStatesToNotify
// array of strings, or you can specify that you want to have Amazon SNS notifications
// sent out for all job states with NotifyAll set to true.
type Notification struct {
_ struct{} `type:"structure"`
// The list of job states that will trigger a notification for this job.
JobStatesToNotify []*string `type:"list"`
// Any change in job state will trigger a notification for this job.
NotifyAll *bool `type:"boolean"`
// The new SNS TopicArn that you want to associate with this job. You can create
// Amazon Resource Names (ARNs) for topics by using the CreateTopic (https://docs.aws.amazon.com/sns/latest/api/API_CreateTopic.html)
// Amazon SNS API action.
//
// You can subscribe email addresses to an Amazon SNS topic through the AWS
// Management Console, or by using the Subscribe (https://docs.aws.amazon.com/sns/latest/api/API_Subscribe.html)
// Amazon Simple Notification Service (Amazon SNS) API action.
SnsTopicARN *string `type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Notification) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Notification) GoString() string {
return s.String()
}
// SetJobStatesToNotify sets the JobStatesToNotify field's value.
func (s *Notification) SetJobStatesToNotify(v []*string) *Notification {
s.JobStatesToNotify = v
return s
}
// SetNotifyAll sets the NotifyAll field's value.
func (s *Notification) SetNotifyAll(v bool) *Notification {
s.NotifyAll = &v
return s
}
// SetSnsTopicARN sets the SnsTopicARN field's value.
func (s *Notification) SetSnsTopicARN(v string) *Notification {
s.SnsTopicARN = &v
return s
}
// An object that represents metadata and configuration settings for services
// on an AWS Snow Family device.
type OnDeviceServiceConfiguration struct {
_ struct{} `type:"structure"`
// Represents the NFS service on a Snow Family device.
NFSOnDeviceService *NFSOnDeviceServiceConfiguration `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s OnDeviceServiceConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s OnDeviceServiceConfiguration) GoString() string {
return s.String()
}
// SetNFSOnDeviceService sets the NFSOnDeviceService field's value.
func (s *OnDeviceServiceConfiguration) SetNFSOnDeviceService(v *NFSOnDeviceServiceConfiguration) *OnDeviceServiceConfiguration {
s.NFSOnDeviceService = v
return s
}
// You get this exception if you call CreateReturnShippingLabel and a valid
// return shipping label already exists. In this case, use DescribeReturnShippingLabel
// to get the url.
type ReturnShippingLabelAlreadyExistsException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ReturnShippingLabelAlreadyExistsException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ReturnShippingLabelAlreadyExistsException) GoString() string {
return s.String()
}
func newErrorReturnShippingLabelAlreadyExistsException(v protocol.ResponseMetadata) error {
return &ReturnShippingLabelAlreadyExistsException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ReturnShippingLabelAlreadyExistsException) Code() string {
return "ReturnShippingLabelAlreadyExistsException"
}
// Message returns the exception's message.
func (s *ReturnShippingLabelAlreadyExistsException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ReturnShippingLabelAlreadyExistsException) OrigErr() error {
return nil
}
func (s *ReturnShippingLabelAlreadyExistsException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ReturnShippingLabelAlreadyExistsException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ReturnShippingLabelAlreadyExistsException) RequestID() string {
return s.RespMetadata.RequestID
}
// Each S3Resource object represents an Amazon S3 bucket that your transferred
// data will be exported from or imported into. For export jobs, this object
// can have an optional KeyRange value. The length of the range is defined at
// job creation, and has either an inclusive BeginMarker, an inclusive EndMarker,
// or both. Ranges are UTF-8 binary sorted.
type S3Resource struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of an Amazon S3 bucket.
BucketArn *string `type:"string"`
// For export jobs, you can provide an optional KeyRange within a specific Amazon
// S3 bucket. The length of the range is defined at job creation, and has either
// an inclusive BeginMarker, an inclusive EndMarker, or both. Ranges are UTF-8
// binary sorted.
KeyRange *KeyRange `type:"structure"`
// Specifies the service or services on the Snow Family device that your transferred
// data will be exported from or imported into. AWS Snow Family supports Amazon
// S3 and NFS (Network File System).
TargetOnDeviceServices []*TargetOnDeviceService `type:"list"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s S3Resource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s S3Resource) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *S3Resource) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "S3Resource"}
if s.KeyRange != nil {
if err := s.KeyRange.Validate(); err != nil {
invalidParams.AddNested("KeyRange", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBucketArn sets the BucketArn field's value.
func (s *S3Resource) SetBucketArn(v string) *S3Resource {
s.BucketArn = &v
return s
}
// SetKeyRange sets the KeyRange field's value.
func (s *S3Resource) SetKeyRange(v *KeyRange) *S3Resource {
s.KeyRange = v
return s
}
// SetTargetOnDeviceServices sets the TargetOnDeviceServices field's value.
func (s *S3Resource) SetTargetOnDeviceServices(v []*TargetOnDeviceService) *S3Resource {
s.TargetOnDeviceServices = v
return s
}
// The Status and TrackingNumber information for an inbound or outbound shipment.
type Shipment struct {
_ struct{} `type:"structure"`
// Status information for a shipment.
Status *string `min:"1" type:"string"`
// The tracking number for this job. Using this tracking number with your region's
// carrier's website, you can track a Snow device as the carrier transports
// it.
//
// For India, the carrier is Amazon Logistics. For all other regions, UPS is
// the carrier.
TrackingNumber *string `min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Shipment) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Shipment) GoString() string {
return s.String()
}
// SetStatus sets the Status field's value.
func (s *Shipment) SetStatus(v string) *Shipment {
s.Status = &v
return s
}
// SetTrackingNumber sets the TrackingNumber field's value.
func (s *Shipment) SetTrackingNumber(v string) *Shipment {
s.TrackingNumber = &v
return s
}
// A job's shipping information, including inbound and outbound tracking numbers
// and shipping speed options.
type ShippingDetails struct {
_ struct{} `type:"structure"`
// The Status and TrackingNumber values for a Snow device being returned to
// AWS for a particular job.
InboundShipment *Shipment `type:"structure"`
// The Status and TrackingNumber values for a Snow device being delivered to
// the address that you specified for a particular job.
OutboundShipment *Shipment `type:"structure"`
// The shipping speed for a particular job. This speed doesn't dictate how soon
// you'll get the Snow device from the job's creation date. This speed represents
// how quickly it moves to its destination while in transit. Regional shipping
// speeds are as follows:
//
// * In Australia, you have access to express shipping. Typically, Snow devices
// shipped express are delivered in about a day.
//
// * In the European Union (EU), you have access to express shipping. Typically,
// Snow devices shipped express are delivered in about a day. In addition,
// most countries in the EU have access to standard shipping, which typically
// takes less than a week, one way.
//
// * In India, Snow devices are delivered in one to seven days.
//
// * In the United States of America (US), you have access to one-day shipping
// and two-day shipping.
ShippingOption *string `type:"string" enum:"ShippingOption"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ShippingDetails) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ShippingDetails) GoString() string {
return s.String()
}
// SetInboundShipment sets the InboundShipment field's value.
func (s *ShippingDetails) SetInboundShipment(v *Shipment) *ShippingDetails {
s.InboundShipment = v
return s
}
// SetOutboundShipment sets the OutboundShipment field's value.
func (s *ShippingDetails) SetOutboundShipment(v *Shipment) *ShippingDetails {
s.OutboundShipment = v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *ShippingDetails) SetShippingOption(v string) *ShippingDetails {
s.ShippingOption = &v
return s
}
// Specifies the device configuration for an AWS Snowcone job.
type SnowconeDeviceConfiguration struct {
_ struct{} `type:"structure"`
// Configures the wireless connection for the AWS Snowcone device.
WirelessConnection *WirelessConnection `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s SnowconeDeviceConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s SnowconeDeviceConfiguration) GoString() string {
return s.String()
}
// SetWirelessConnection sets the WirelessConnection field's value.
func (s *SnowconeDeviceConfiguration) SetWirelessConnection(v *WirelessConnection) *SnowconeDeviceConfiguration {
s.WirelessConnection = v
return s
}
// An object that represents the service or services on the Snow Family device
// that your transferred data will be exported from or imported into. AWS Snow
// Family supports Amazon S3 and NFS (Network File System).
type TargetOnDeviceService struct {
_ struct{} `type:"structure"`
// Specifies the name of the service on the Snow Family device that your transferred
// data will be exported from or imported into.
ServiceName *string `type:"string" enum:"DeviceServiceName"`
// Specifies whether the data is being imported or exported. You can import
// or export the data, or use it locally on the device.
TransferOption *string `type:"string" enum:"TransferOption"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TargetOnDeviceService) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TargetOnDeviceService) GoString() string {
return s.String()
}
// SetServiceName sets the ServiceName field's value.
func (s *TargetOnDeviceService) SetServiceName(v string) *TargetOnDeviceService {
s.ServiceName = &v
return s
}
// SetTransferOption sets the TransferOption field's value.
func (s *TargetOnDeviceService) SetTransferOption(v string) *TargetOnDeviceService {
s.TransferOption = &v
return s
}
// The tax documents required in your AWS Region.
type TaxDocuments struct {
_ struct{} `type:"structure"`
// The tax documents required in AWS Regions in India.
IND *INDTaxDocuments `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TaxDocuments) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TaxDocuments) GoString() string {
return s.String()
}
// SetIND sets the IND field's value.
func (s *TaxDocuments) SetIND(v *INDTaxDocuments) *TaxDocuments {
s.IND = v
return s
}
// The address is either outside the serviceable area for your region, or an
// error occurred. Check the address with your region's carrier and try again.
// If the issue persists, contact AWS Support.
type UnsupportedAddressException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"Message" min:"1" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UnsupportedAddressException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UnsupportedAddressException) GoString() string {
return s.String()
}
func newErrorUnsupportedAddressException(v protocol.ResponseMetadata) error {
return &UnsupportedAddressException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *UnsupportedAddressException) Code() string {
return "UnsupportedAddressException"
}
// Message returns the exception's message.
func (s *UnsupportedAddressException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *UnsupportedAddressException) OrigErr() error {
return nil
}
func (s *UnsupportedAddressException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *UnsupportedAddressException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *UnsupportedAddressException) RequestID() string {
return s.RespMetadata.RequestID
}
type UpdateClusterInput struct {
_ struct{} `type:"structure"`
// The ID of the updated Address object.
AddressId *string `min:"40" type:"string"`
// The cluster ID of the cluster that you want to update, for example CID123e4567-e89b-12d3-a456-426655440000.
//
// ClusterId is a required field
ClusterId *string `min:"39" type:"string" required:"true"`
// The updated description of this cluster.
Description *string `min:"1" type:"string"`
// The updated ID for the forwarding address for a cluster. This field is not
// supported in most regions.
ForwardingAddressId *string `min:"40" type:"string"`
// The new or updated Notification object.
Notification *Notification `type:"structure"`
// Specifies the service or services on the Snow Family device that your transferred
// data will be exported from or imported into. AWS Snow Family supports Amazon
// S3 and NFS (Network File System).
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// The updated arrays of JobResource objects that can include updated S3Resource
// objects or LambdaResource objects.
Resources *JobResource `type:"structure"`
// The new role Amazon Resource Name (ARN) that you want to associate with this
// cluster. To create a role ARN, use the CreateRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)
// API action in AWS Identity and Access Management (IAM).
RoleARN *string `type:"string"`
// The updated shipping option value of this cluster's ShippingDetails object.
ShippingOption *string `type:"string" enum:"ShippingOption"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateClusterInput"}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if s.ClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("ClusterId"))
}
if s.ClusterId != nil && len(*s.ClusterId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ClusterId", 39))
}
if s.Description != nil && len(*s.Description) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Description", 1))
}
if s.ForwardingAddressId != nil && len(*s.ForwardingAddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("ForwardingAddressId", 40))
}
if s.Resources != nil {
if err := s.Resources.Validate(); err != nil {
invalidParams.AddNested("Resources", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *UpdateClusterInput) SetAddressId(v string) *UpdateClusterInput {
s.AddressId = &v
return s
}
// SetClusterId sets the ClusterId field's value.
func (s *UpdateClusterInput) SetClusterId(v string) *UpdateClusterInput {
s.ClusterId = &v
return s
}
// SetDescription sets the Description field's value.
func (s *UpdateClusterInput) SetDescription(v string) *UpdateClusterInput {
s.Description = &v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *UpdateClusterInput) SetForwardingAddressId(v string) *UpdateClusterInput {
s.ForwardingAddressId = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *UpdateClusterInput) SetNotification(v *Notification) *UpdateClusterInput {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *UpdateClusterInput) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *UpdateClusterInput {
s.OnDeviceServiceConfiguration = v
return s
}
// SetResources sets the Resources field's value.
func (s *UpdateClusterInput) SetResources(v *JobResource) *UpdateClusterInput {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *UpdateClusterInput) SetRoleARN(v string) *UpdateClusterInput {
s.RoleARN = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *UpdateClusterInput) SetShippingOption(v string) *UpdateClusterInput {
s.ShippingOption = &v
return s
}
type UpdateClusterOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateClusterOutput) GoString() string {
return s.String()
}
type UpdateJobInput struct {
_ struct{} `type:"structure"`
// The ID of the updated Address object.
AddressId *string `min:"40" type:"string"`
// The updated description of this job's JobMetadata object.
Description *string `min:"1" type:"string"`
// The updated ID for the forwarding address for a job. This field is not supported
// in most regions.
ForwardingAddressId *string `min:"40" type:"string"`
// The job ID of the job that you want to update, for example JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
// The new or updated Notification object.
Notification *Notification `type:"structure"`
// Specifies the service or services on the Snow Family device that your transferred
// data will be exported from or imported into. AWS Snow Family supports Amazon
// S3 and NFS (Network File System).
OnDeviceServiceConfiguration *OnDeviceServiceConfiguration `type:"structure"`
// The updated JobResource object, or the updated JobResource object.
Resources *JobResource `type:"structure"`
// The new role Amazon Resource Name (ARN) that you want to associate with this
// job. To create a role ARN, use the CreateRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateRole.html)AWS
// Identity and Access Management (IAM) API action.
RoleARN *string `type:"string"`
// The updated shipping option value of this job's ShippingDetails object.
ShippingOption *string `type:"string" enum:"ShippingOption"`
// The updated SnowballCapacityPreference of this job's JobMetadata object.
// The 50 TB Snowballs are only available in the US regions.
//
// For more information, see "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide or "https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html"
// (Snow Family Devices and Capacity) in the Snowcone User Guide.
SnowballCapacityPreference *string `type:"string" enum:"Capacity"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateJobInput"}
if s.AddressId != nil && len(*s.AddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("AddressId", 40))
}
if s.Description != nil && len(*s.Description) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Description", 1))
}
if s.ForwardingAddressId != nil && len(*s.ForwardingAddressId) < 40 {
invalidParams.Add(request.NewErrParamMinLen("ForwardingAddressId", 40))
}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if s.Resources != nil {
if err := s.Resources.Validate(); err != nil {
invalidParams.AddNested("Resources", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAddressId sets the AddressId field's value.
func (s *UpdateJobInput) SetAddressId(v string) *UpdateJobInput {
s.AddressId = &v
return s
}
// SetDescription sets the Description field's value.
func (s *UpdateJobInput) SetDescription(v string) *UpdateJobInput {
s.Description = &v
return s
}
// SetForwardingAddressId sets the ForwardingAddressId field's value.
func (s *UpdateJobInput) SetForwardingAddressId(v string) *UpdateJobInput {
s.ForwardingAddressId = &v
return s
}
// SetJobId sets the JobId field's value.
func (s *UpdateJobInput) SetJobId(v string) *UpdateJobInput {
s.JobId = &v
return s
}
// SetNotification sets the Notification field's value.
func (s *UpdateJobInput) SetNotification(v *Notification) *UpdateJobInput {
s.Notification = v
return s
}
// SetOnDeviceServiceConfiguration sets the OnDeviceServiceConfiguration field's value.
func (s *UpdateJobInput) SetOnDeviceServiceConfiguration(v *OnDeviceServiceConfiguration) *UpdateJobInput {
s.OnDeviceServiceConfiguration = v
return s
}
// SetResources sets the Resources field's value.
func (s *UpdateJobInput) SetResources(v *JobResource) *UpdateJobInput {
s.Resources = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *UpdateJobInput) SetRoleARN(v string) *UpdateJobInput {
s.RoleARN = &v
return s
}
// SetShippingOption sets the ShippingOption field's value.
func (s *UpdateJobInput) SetShippingOption(v string) *UpdateJobInput {
s.ShippingOption = &v
return s
}
// SetSnowballCapacityPreference sets the SnowballCapacityPreference field's value.
func (s *UpdateJobInput) SetSnowballCapacityPreference(v string) *UpdateJobInput {
s.SnowballCapacityPreference = &v
return s
}
type UpdateJobOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobOutput) GoString() string {
return s.String()
}
type UpdateJobShipmentStateInput struct {
_ struct{} `type:"structure"`
// The job ID of the job whose shipment date you want to update, for example
// JID123e4567-e89b-12d3-a456-426655440000.
//
// JobId is a required field
JobId *string `min:"39" type:"string" required:"true"`
// The state of a device when it is being shipped.
//
// Set to RECEIVED when the device arrives at your location.
//
// Set to RETURNED when you have returned the device to AWS.
//
// ShipmentState is a required field
ShipmentState *string `type:"string" required:"true" enum:"ShipmentState"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobShipmentStateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobShipmentStateInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateJobShipmentStateInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateJobShipmentStateInput"}
if s.JobId == nil {
invalidParams.Add(request.NewErrParamRequired("JobId"))
}
if s.JobId != nil && len(*s.JobId) < 39 {
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
if s.ShipmentState == nil {
invalidParams.Add(request.NewErrParamRequired("ShipmentState"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetJobId sets the JobId field's value.
func (s *UpdateJobShipmentStateInput) SetJobId(v string) *UpdateJobShipmentStateInput {
s.JobId = &v
return s
}
// SetShipmentState sets the ShipmentState field's value.
func (s *UpdateJobShipmentStateInput) SetShipmentState(v string) *UpdateJobShipmentStateInput {
s.ShipmentState = &v
return s
}
type UpdateJobShipmentStateOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobShipmentStateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateJobShipmentStateOutput) GoString() string {
return s.String()
}
type UpdateLongTermPricingInput struct {
_ struct{} `type:"structure"`
// If set to true, specifies that the current long-term pricing type for the
// device should be automatically renewed before the long-term pricing contract
// expires.
IsLongTermPricingAutoRenew *bool `type:"boolean"`
// The ID of the long-term pricing type for the device.
//
// LongTermPricingId is a required field
LongTermPricingId *string `min:"41" type:"string" required:"true"`
// Specifies that a device that is ordered with long-term pricing should be
// replaced with a new device.
ReplacementJob *string `min:"39" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateLongTermPricingInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateLongTermPricingInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateLongTermPricingInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateLongTermPricingInput"}
if s.LongTermPricingId == nil {
invalidParams.Add(request.NewErrParamRequired("LongTermPricingId"))
}
if s.LongTermPricingId != nil && len(*s.LongTermPricingId) < 41 {
invalidParams.Add(request.NewErrParamMinLen("LongTermPricingId", 41))
}
if s.ReplacementJob != nil && len(*s.ReplacementJob) < 39 {
invalidParams.Add(request.NewErrParamMinLen("ReplacementJob", 39))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIsLongTermPricingAutoRenew sets the IsLongTermPricingAutoRenew field's value.
func (s *UpdateLongTermPricingInput) SetIsLongTermPricingAutoRenew(v bool) *UpdateLongTermPricingInput {
s.IsLongTermPricingAutoRenew = &v
return s
}
// SetLongTermPricingId sets the LongTermPricingId field's value.
func (s *UpdateLongTermPricingInput) SetLongTermPricingId(v string) *UpdateLongTermPricingInput {
s.LongTermPricingId = &v
return s
}
// SetReplacementJob sets the ReplacementJob field's value.
func (s *UpdateLongTermPricingInput) SetReplacementJob(v string) *UpdateLongTermPricingInput {
s.ReplacementJob = &v
return s
}
type UpdateLongTermPricingOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateLongTermPricingOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UpdateLongTermPricingOutput) GoString() string {
return s.String()
}
// Configures the wireless connection on an AWS Snowcone device.
type WirelessConnection struct {
_ struct{} `type:"structure"`
// Enables the Wi-Fi adapter on an AWS Snowcone device.
IsWifiEnabled *bool `type:"boolean"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s WirelessConnection) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s WirelessConnection) GoString() string {
return s.String()
}
// SetIsWifiEnabled sets the IsWifiEnabled field's value.
func (s *WirelessConnection) SetIsWifiEnabled(v bool) *WirelessConnection {
s.IsWifiEnabled = &v
return s
}
const (
// CapacityT50 is a Capacity enum value
CapacityT50 = "T50"
// CapacityT80 is a Capacity enum value
CapacityT80 = "T80"
// CapacityT100 is a Capacity enum value
CapacityT100 = "T100"
// CapacityT42 is a Capacity enum value
CapacityT42 = "T42"
// CapacityT98 is a Capacity enum value
CapacityT98 = "T98"
// CapacityT8 is a Capacity enum value
CapacityT8 = "T8"
// CapacityT14 is a Capacity enum value
CapacityT14 = "T14"
// CapacityNoPreference is a Capacity enum value
CapacityNoPreference = "NoPreference"
)
// Capacity_Values returns all elements of the Capacity enum
func Capacity_Values() []string {
return []string{
CapacityT50,
CapacityT80,
CapacityT100,
CapacityT42,
CapacityT98,
CapacityT8,
CapacityT14,
CapacityNoPreference,
}
}
const (
// ClusterStateAwaitingQuorum is a ClusterState enum value
ClusterStateAwaitingQuorum = "AwaitingQuorum"
// ClusterStatePending is a ClusterState enum value
ClusterStatePending = "Pending"
// ClusterStateInUse is a ClusterState enum value
ClusterStateInUse = "InUse"
// ClusterStateComplete is a ClusterState enum value
ClusterStateComplete = "Complete"
// ClusterStateCancelled is a ClusterState enum value
ClusterStateCancelled = "Cancelled"
)
// ClusterState_Values returns all elements of the ClusterState enum
func ClusterState_Values() []string {
return []string{
ClusterStateAwaitingQuorum,
ClusterStatePending,
ClusterStateInUse,
ClusterStateComplete,
ClusterStateCancelled,
}
}
const (
// DeviceServiceNameNfsOnDeviceService is a DeviceServiceName enum value
DeviceServiceNameNfsOnDeviceService = "NFS_ON_DEVICE_SERVICE"
// DeviceServiceNameS3OnDeviceService is a DeviceServiceName enum value
DeviceServiceNameS3OnDeviceService = "S3_ON_DEVICE_SERVICE"
)
// DeviceServiceName_Values returns all elements of the DeviceServiceName enum
func DeviceServiceName_Values() []string {
return []string{
DeviceServiceNameNfsOnDeviceService,
DeviceServiceNameS3OnDeviceService,
}
}
const (
// JobStateNew is a JobState enum value
JobStateNew = "New"
// JobStatePreparingAppliance is a JobState enum value
JobStatePreparingAppliance = "PreparingAppliance"
// JobStatePreparingShipment is a JobState enum value
JobStatePreparingShipment = "PreparingShipment"
// JobStateInTransitToCustomer is a JobState enum value
JobStateInTransitToCustomer = "InTransitToCustomer"
// JobStateWithCustomer is a JobState enum value
JobStateWithCustomer = "WithCustomer"
// JobStateInTransitToAws is a JobState enum value
JobStateInTransitToAws = "InTransitToAWS"
// JobStateWithAwssortingFacility is a JobState enum value
JobStateWithAwssortingFacility = "WithAWSSortingFacility"
// JobStateWithAws is a JobState enum value
JobStateWithAws = "WithAWS"
// JobStateInProgress is a JobState enum value
JobStateInProgress = "InProgress"
// JobStateComplete is a JobState enum value
JobStateComplete = "Complete"
// JobStateCancelled is a JobState enum value
JobStateCancelled = "Cancelled"
// JobStateListing is a JobState enum value
JobStateListing = "Listing"
// JobStatePending is a JobState enum value
JobStatePending = "Pending"
)
// JobState_Values returns all elements of the JobState enum
func JobState_Values() []string {
return []string{
JobStateNew,
JobStatePreparingAppliance,
JobStatePreparingShipment,
JobStateInTransitToCustomer,
JobStateWithCustomer,
JobStateInTransitToAws,
JobStateWithAwssortingFacility,
JobStateWithAws,
JobStateInProgress,
JobStateComplete,
JobStateCancelled,
JobStateListing,
JobStatePending,
}
}
const (
// JobTypeImport is a JobType enum value
JobTypeImport = "IMPORT"
// JobTypeExport is a JobType enum value
JobTypeExport = "EXPORT"
// JobTypeLocalUse is a JobType enum value
JobTypeLocalUse = "LOCAL_USE"
)
// JobType_Values returns all elements of the JobType enum
func JobType_Values() []string {
return []string{
JobTypeImport,
JobTypeExport,
JobTypeLocalUse,
}
}
const (
// LongTermPricingTypeOneYear is a LongTermPricingType enum value
LongTermPricingTypeOneYear = "OneYear"
// LongTermPricingTypeThreeYear is a LongTermPricingType enum value
LongTermPricingTypeThreeYear = "ThreeYear"
)
// LongTermPricingType_Values returns all elements of the LongTermPricingType enum
func LongTermPricingType_Values() []string {
return []string{
LongTermPricingTypeOneYear,
LongTermPricingTypeThreeYear,
}
}
const (
// RemoteManagementInstalledOnly is a RemoteManagement enum value
RemoteManagementInstalledOnly = "INSTALLED_ONLY"
// RemoteManagementInstalledAutostart is a RemoteManagement enum value
RemoteManagementInstalledAutostart = "INSTALLED_AUTOSTART"
)
// RemoteManagement_Values returns all elements of the RemoteManagement enum
func RemoteManagement_Values() []string {
return []string{
RemoteManagementInstalledOnly,
RemoteManagementInstalledAutostart,
}
}
const (
// ShipmentStateReceived is a ShipmentState enum value
ShipmentStateReceived = "RECEIVED"
// ShipmentStateReturned is a ShipmentState enum value
ShipmentStateReturned = "RETURNED"
)
// ShipmentState_Values returns all elements of the ShipmentState enum
func ShipmentState_Values() []string {
return []string{
ShipmentStateReceived,
ShipmentStateReturned,
}
}
const (
// ShippingLabelStatusInProgress is a ShippingLabelStatus enum value
ShippingLabelStatusInProgress = "InProgress"
// ShippingLabelStatusTimedOut is a ShippingLabelStatus enum value
ShippingLabelStatusTimedOut = "TimedOut"
// ShippingLabelStatusSucceeded is a ShippingLabelStatus enum value
ShippingLabelStatusSucceeded = "Succeeded"
// ShippingLabelStatusFailed is a ShippingLabelStatus enum value
ShippingLabelStatusFailed = "Failed"
)
// ShippingLabelStatus_Values returns all elements of the ShippingLabelStatus enum
func ShippingLabelStatus_Values() []string {
return []string{
ShippingLabelStatusInProgress,
ShippingLabelStatusTimedOut,
ShippingLabelStatusSucceeded,
ShippingLabelStatusFailed,
}
}
const (
// ShippingOptionSecondDay is a ShippingOption enum value
ShippingOptionSecondDay = "SECOND_DAY"
// ShippingOptionNextDay is a ShippingOption enum value
ShippingOptionNextDay = "NEXT_DAY"
// ShippingOptionExpress is a ShippingOption enum value
ShippingOptionExpress = "EXPRESS"
// ShippingOptionStandard is a ShippingOption enum value
ShippingOptionStandard = "STANDARD"
)
// ShippingOption_Values returns all elements of the ShippingOption enum
func ShippingOption_Values() []string {
return []string{
ShippingOptionSecondDay,
ShippingOptionNextDay,
ShippingOptionExpress,
ShippingOptionStandard,
}
}
const (
// StorageUnitTb is a StorageUnit enum value
StorageUnitTb = "TB"
)
// StorageUnit_Values returns all elements of the StorageUnit enum
func StorageUnit_Values() []string {
return []string{
StorageUnitTb,
}
}
const (
// TransferOptionImport is a TransferOption enum value
TransferOptionImport = "IMPORT"
// TransferOptionExport is a TransferOption enum value
TransferOptionExport = "EXPORT"
// TransferOptionLocalUse is a TransferOption enum value
TransferOptionLocalUse = "LOCAL_USE"
)
// TransferOption_Values returns all elements of the TransferOption enum
func TransferOption_Values() []string {
return []string{
TransferOptionImport,
TransferOptionExport,
TransferOptionLocalUse,
}
}
const (
// TypeStandard is a Type enum value
TypeStandard = "STANDARD"
// TypeEdge is a Type enum value
TypeEdge = "EDGE"
// TypeEdgeC is a Type enum value
TypeEdgeC = "EDGE_C"
// TypeEdgeCg is a Type enum value
TypeEdgeCg = "EDGE_CG"
// TypeEdgeS is a Type enum value
TypeEdgeS = "EDGE_S"
// TypeSnc1Hdd is a Type enum value
TypeSnc1Hdd = "SNC1_HDD"
// TypeSnc1Ssd is a Type enum value
TypeSnc1Ssd = "SNC1_SSD"
)
// Type_Values returns all elements of the Type enum
func Type_Values() []string {
return []string{
TypeStandard,
TypeEdge,
TypeEdgeC,
TypeEdgeCg,
TypeEdgeS,
TypeSnc1Hdd,
TypeSnc1Ssd,
}
}
|
{
invalidParams.Add(request.NewErrParamMinLen("JobId", 39))
}
|
non_distributable_flags.go
|
// Copyright 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package cmd
import "github.com/spf13/cobra"
type IncludeNonDistributableFlag struct {
|
}
func (i *IncludeNonDistributableFlag) Set(cmd *cobra.Command) {
cmd.Flags().BoolVar(&i.IncludeNonDistributable, "include-non-distributable-layers", false,
"Include non-distributable layers when copying an image/bundle")
}
|
IncludeNonDistributable bool
|
openWindAcComponent.py
|
# openWindAcComponent.py
# 2014 04 08
'''
Execute OpenWindAcademic as an OpenMDAO Component
After execute(), the following variables have been updated:
nTurbs
net_aep
gross_aep
They can be accessed through appropriate connections.
NOTE: Script file must contain an Optimize/Optimise operation - otherwise,
no results will be found.
Typical use (e.g., from an Assembly):
owac = OWACcomp(owExe, scriptFile=scrptName, debug=False, stopOW=True, start_once=False, opt_log=False)
main() runs OWACcomp.execute() 3 times, moving and modifying the turbines each time
'''
import os.path
import sys, time
import subprocess
from lxml import etree
import owAcademicUtils as acutils
import plant_energyse.openwind.openWindUtils as utils
import plant_energyse.openwind.rwScriptXML as rwScriptXML
import plant_energyse.openwind.rwTurbXML as rwTurbXML
import plant_energyse.openwind.turbfuncs as turbfuncs
from openmdao.lib.datatypes.api import Float, Int, VarTree
from openmdao.main.api import FileMetadata, Component, VariableTree
from fusedwind.plant_flow.vt import GenericWindTurbineVT, \
GenericWindTurbinePowerCurveVT, ExtendedWindTurbinePowerCurveVT, \
GenericWindFarmTurbineLayout, ExtendedWindFarmTurbineLayout
from fusedwind.interface import implement_base
from fusedwind.plant_flow.comp import BaseAEPAggregator
#-----------------------------------------------------------------
@implement_base(BaseAEPAggregator)
class OWACcomp(Component):
""" A simple OpenMDAO component for OpenWind academic
Args:
owExe (str): full path to OpenWind executable
scriptFile (str): path to XML script that OpenWind will run
"""
# inputs
rotor_diameter = Float(126.0, iotype='in', units='m', desc='connecting rotor diameter to force run on change') # todo: hack for now
availability = Float(0.95, iotype='in', desc='availability')
other_losses = Float(0.0, iotype='in', desc='soiling losses')
wt_layout = VarTree(ExtendedWindFarmTurbineLayout(), iotype='in', desc='properties for each wind turbine and layout')
dummyVbl = Float(0, iotype='in', desc='unused variable to make it easy to do DOE runs')
# outputs
gross_aep = Float(0.0, iotype='out', desc='Gross Output')
net_aep = Float(0.0, iotype='out', desc='Net Output')
nTurbs = Int(0, iotype='out', desc='Number of turbines')
#array_aep = Float(0.0, iotype='out', desc='Array output - NOT USED IN ACADEMIC VERSION')
#array_efficiency = Float(0.0, iotype='out', desc='Array Efficiency')
#array_losses = Float(0.0, iotype='out', desc='Array losses')
def __init__(self, owExe, scriptFile=None, debug=False, stopOW=True, start_once=False, opt_log=False):
""" Constructor for the OWACwrapped component """
self.debug = debug
if self.debug:
sys.stderr.write('\nIn {:}.__init__()\n'.format(self.__class__))
super(OWACcomp, self).__init__()
# public variables
self.input_file = 'myinput.txt'
self.output_file = 'myoutput.txt'
self.stderr = 'myerror.log'
# external_files : member of Component class = list of FileMetadata objects
self.external_files = [
FileMetadata(path=self.output_file),
FileMetadata(path=self.stderr),
]
self.stopOW = stopOW
self.start_once = start_once
self.replace_turbine = False
self.opt_log = opt_log
self.resname = '' # start with empty string
self.script_file = scriptFile
self.scriptOK = False
if scriptFile is not None:
# Check script file for validity and extract some path information
self.scriptOK = self.parse_scriptFile()
if not self.scriptOK:
raise ValueError
return
self.scriptDict = rwScriptXML.rdScript(self.script_file, self.debug)
if self.debug:
sys.stderr.write('Script File Contents:\n')
for k in self.scriptDict.keys():
sys.stderr.write(' {:12s} {:}\n'.format(k,self.scriptDict[k]))
# Log all optimization settings?
if self.opt_log:
self.olname = 'owacOptLog.txt'
self.olfh = open(self.olname, 'w')
if self.debug:
sys.stderr.write('Logging optimization params to {:}\n'.format(self.olname))
# Set the version of OpenWind that we want to use
self.command = [owExe, self.script_file]
# Keep the initial value of rotor diam so we can
# see if it (or other turb param) has changed
self.rtr_diam_init = self.rotor_diameter
# ... other params ....
# Try starting OpenWind here (if self.start_once is True)
if self.start_once:
self.proc = subprocess.Popen(self.command)
self.pid = self.proc.pid
if self.debug:
sys.stderr.write('Started OpenWind with pid {:}\n'.format(self.pid))
sys.stderr.write(' OWACComp: dummyVbl {:}\n'.format(self.dummyVbl))
if self.debug:
sys.stderr.write('\nLeaving {:}.__init__()\n'.format(self.__class__))
#------------------
def parse_scriptFile(self):
# OWac looks for the notify files and writes the 'results.txt' file to the
# directory that contains the *blb workbook file
# Find where the results file will be found
if not os.path.isfile(self.script_file):
sys.stderr.write('\n*** OpenWind script file "{:}" not found\n'.format(self.script_file))
return False
try:
e = etree.parse(self.script_file)
self.rptpath = e.getroot().find('ReportPath').get('value')
except:
sys.stderr.write("\n*** Can't find ReportPath in {:}\n".format(self.script_file))
self.rptpath = 'NotFound'
return False
# Make sure there's an optimize operation - otherwise OWAC won't find anything
foundOpt = False
self.replace_turbine = False
ops = e.getroot().findall('.//Operation')
for op in ops:
optype = op.find('Type').get('value')
if optype == 'Optimize' or optype == 'Optimise':
foundOpt = True
break
if optype == 'Replace Turbine Type':
self.replace_turbine = True
sys.stderr.write('\n*** WARNING: start_once will be set to False because Replace Turbine\n')
sys.stderr.write(' operation is present in {:}\n\n'.format(self.script_file))
self.start_once = False
if not foundOpt:
sys.stderr.write('\n*** ERROR: no Optimize operation found in {:}\n\n'.format(self.script_file))
return False
if self.replace_turbine and self.start_once:
sys.stderr.write("*** WARNING: can't use start_once when replacing turbine\n")
sys.stderr.write(" setting start_once to False\n")
self.start_once = False
# Find the workbook folder and save as dname
self.dname = None
for op in ops:
if op.find('Type').get('value') == 'Change Workbook':
wkbk = op.find('Path').get('value')
if not os.path.isfile(wkbk):
sys.stderr.write("\n*** OpenWind workbook file {:}\n not found\n".format(wkbk))
sys.stderr.write(" (specified in script file {:})\n".format(self.script_file))
return False
self.dname = os.path.dirname(wkbk)
if self.debug:
sys.stderr.write('Working directory: {:}\n'.format(self.dname))
break
self.resname = '/'.join([self.dname,'results.txt'])
return True
#------------------
def execute(self):
""" Executes our component. """
if self.debug:
sys.stderr.write(" In {0}.execute() {1}...\n".format(self.__class__, self.script_file))
if (len(self.resname) < 1):
sys.stderr.write('\n*** ERROR: OWAcomp results file name not assigned! (problem with script file?)\n\n')
return False
# Prepare input file here
# - write a new script file?
# - write a new turbine file to overwrite the one referenced
# in the existing script_file?
# If there is a turbine replacement operation in the script:
# write new turbine description file based on contents of first turbine in layout
#if 'replturbpath' in self.scriptDict:
if self.replace_turbine:
if len(self.wt_layout.wt_list) < 1:
sys.stderr.write('\n*** ERROR *** OWACcomp::execute(): no turbines in wt_layout!\n\n')
return False
if self.debug:
sys.stderr.write('Replacement turbine parameters:\n')
#sys.stderr.write('{:}\n'.format(turbfuncs.wtpc_dump(self.wt_layout.wt_list[0])))
sys.stderr.write('{:}\n'.format(turbfuncs.wtpc_dump(self.wt_layout.wt_list[0], shortFmt=True)))
#sys.stderr.write('{:}\n'.format(wtlDump(self.wt_layout.wt_list[0])))
newXML = turbfuncs.wtpc_to_owtg(self.wt_layout.wt_list[0],
trbname='ReplTurb',
desc='OWACcomp replacement turbine')
if len(newXML) > 50:
tfname = self.scriptDict['replturbpath'] # this is the file that will be overwritten with new turbine parameters
tfh = open(tfname, 'w')
tfh.write(newXML)
tfh.close()
maxPower = self.wt_layout.wt_list[0].power_rating
if self.debug:
sys.stderr.write('Wrote new turbine file to {:} (rated pwr {:.2f} MW\n'.format(tfname, maxPower*0.000001))
else:
sys.stderr.write('*** NO new turbine file written\n')
# Execute the component and save process ID
if not self.start_once:
self.proc = subprocess.Popen(self.command)
self.pid = self.proc.pid
if self.debug:
sys.stderr.write('Started OpenWind with pid {:}\n'.format(self.pid))
sys.stderr.write(' OWACComp: dummyVbl {:}\n'.format(self.dummyVbl))
#sys.stderr.write('Report Path: {:}\n'.format(self.rptpath))
# Watch for 'results.txt', meaning that OW has run once with the default locations
if self.debug:
sys.stderr.write('OWACComp waiting for {:} (first run - positions unchanged)\n'.format(self.resname))
acutils.waitForNotify(watchFile=self.resname, path=self.dname, debug=False, callback=self.getCBvalue)
# Now OWac is waiting for a new position file
# Write new positions and notify file - this time it should use updated positions
acutils.writePositionFile(self.wt_layout.wt_positions, path=self.dname, debug=self.debug)
# see if results.txt is there already
if os.path.exists(self.resname):
resmtime = os.path.getmtime(self.resname)
if self.debug:
sys.stderr.write('ModTime({:}): {:}\n'.format(self.resname, time.asctime(time.localtime(resmtime))))
else:
if self.debug:
sys.stderr.write('{:} does not exist yet\n'.format(self.resname))
acutils.writeNotify(path=self.dname, debug=self.debug) # tell OW that we're ready for the next (only) iteration
# 'results.txt' is in the same directory as the *blb file
if os.path.exists(self.resname):
resNewmtime = os.path.getmtime(self.resname)
if resNewmtime > resmtime: # file has changed
if self.debug:
sys.stderr.write('results.txt already updated')
else:
acutils.waitForNotify(watchFile=self.resname, path=self.dname, callback=self.getCBvalue, debug=self.debug)
else:
if self.debug:
sys.stderr.write('OWACComp waiting for {:} (modified positions)\n'.format(self.resname))
acutils.waitForNotify(watchFile=self.resname, path=self.dname, callback=self.getCBvalue, debug=self.debug)
# Parse output file
# Enterprise OW writes the report file specified in the script BUT
# Academic OW writes 'results.txt' (which doesn't have as much information)
netEnergy, netNRGturb, grossNRGturb = acutils.parseACresults(fname=self.resname)
if netEnergy is None:
sys.stderr.write("Error reading results file\n")
if self.debug:
sys.stderr.write('Stopping OpenWind with pid {:}\n'.format(self.pid))
self.proc.terminate()
return False
# Set the output variables
# - array_aep is not available from Academic 'results.txt' file
self.nTurbs = len(netNRGturb)
self.net_aep = netEnergy
self.gross_aep = sum(grossNRGturb)
if self.debug:
sys.stderr.write('{:}\n'.format(self.dump()))
# Log optimization values
if self.opt_log:
self.olfh.write('{:3d} G {:.4f} N {:.4f} XY '.format(self.exec_count, self.gross_aep, self.net_aep))
for ii in range(len(wt_positions)):
self.olfh.write('{:8.1f} {:9.1f} '.format(self.wt_layout.wt_positions[ii][0], self.wt_layout.wt_positions[ii][1]))
self.olfh.write('\n')
if not self.start_once and self.stopOW:
if self.debug:
sys.stderr.write('Stopping OpenWind with pid {:}\n'.format(self.pid))
self.proc.terminate()
self.checkReport() # check for execution errors
if self.debug:
sys.stderr.write(" Leaving {0}.execute() {1}...\n".format(self.__class__, self.script_file))
#------------------
def dump(self):
# returns a string with a summary of object parameters
|
#------------------
def getCBvalue(self,val):
''' Callback invoked when waitForNotify detects change in results file
Sets self.net_aep to its argument
waitForNotify has handler which reads results.txt and calls this
function with netEnergy
Is this redundant with other parser for results.txt?
'''
self.net_aep = val
#------------------
def terminateOW(self):
''' Terminate the OpenWind process '''
if self.debug:
sys.stderr.write('Stopping OpenWind with pid {:}\n'.format(self.pid))
self.proc.terminate()
#------------------
def checkReport(self):
''' check the report file for errors '''
fname = self.scriptDict['rptpath']
if self.debug:
sys.stderr.write('checkReport : {:}\n'.format(fname))
fh = open(fname, 'r')
for line in fh.readlines():
if line.startswith('Failed to find and replace turbine type'):
sys.stderr.write('\n*** ERROR: turbine replacement operation failed\n')
sys.stderr.write(' Replace {:}\n'.format(self.scriptDict['replturbname']))
sys.stderr.write(' with {:}\n'.format(self.scriptDict['replturbpath']))
sys.stderr.write('\n')
fh.close()
#------------------------------------------------------------------
def dummy_wt_list():
wtl = ExtendedWindTurbinePowerCurveVT()
nv = 20
wtl.hub_height = 100.0
wtl.rotor_diameter = 90.0
wtl.power_rating = 3.0
wtl.rpm_curve = [ [float(i), 10.0] for i in range(nv) ]
wtl.pitch_curve = [ [float(i), 0.0] for i in range(nv) ]
wtl.c_t_curve = [ [float(i), 10.0] for i in range(nv) ]
wtl.power_curve = [ [float(i), 10.0] for i in range(nv) ]
return wtl
#------------------------------------------------------------------
def wtlDump(wtl):
wstr = 'WTL: pclen {:}'.format(len(wtl.c_t_curve))
return wstr
#------------------------------------------------------------------
''' OWACComp.wt_layout is a ExtendedWindFarmTurbineLayout(VariableTree) and has
wt_list = List(ExtendedWindTurbinePowerCurveVT(), desc='The wind turbine list of descriptions [n_wt]')
wt_positions = Array(units='m', desc='Array of wind turbines attached to particular positions [n_wt, 2]')
(among others)
We use wt_positions to move the turbines - we update the values and copy them to
file 'positions.txt' at each iteration using writePositionFile()
(ow.wt_layout.wt_positions and wt_positions are 2 copies of the same data)
If we are replacing the turbines, we use wt_list to hold the modified turbine.
We initialize wt_layout.wt_list with copies of the values in base_turbine_file.
At each iteration, we tweak the values in wt_layout.wt_list.
When OWACComp.execute runs, it writes a new turbine file
using the values in wt_layout.wt_list[0]
This turbine file is the same one specified in the script:
<TurbinePath value="../templates/ReplTurb.owtg"/>
When OpenWind runs the Replace Turbine operation, it looks for all turbines whose name matches
the value in <TurbineName value="NREL 5MW"/> and replaces them with the turbine described in
file <TurbinePath>
Does the base_turbine_file need to match the default turbine in the workbook?
How can we get that name?
- run OW energy capture, scan file
- but scripted energy capture doesn't have full description of turbine
'''
def example(owExe):
debug = False
start_once = False
modify_turbine = False
opt_log = False
for arg in sys.argv[1:]:
if arg == '-debug':
debug = True
if arg == '-once':
start_once = True
if arg == '-log':
opt_log = True
if arg == '-modturb':
modify_turbine = True
if arg == '-help':
sys.stderr.write('USAGE: python openWindAcComponent.py [-once] [-debug]\n')
exit()
# Find OpenWind executable
if not os.path.isfile(owExe):
sys.stderr.write('OpenWind executable file "{:}" not found\n'.format(owExe))
exit()
# set the external optimiser flag to True so that we can use our optimizing routines
acutils.owIniSet(owExe, extVal=True, debug=True)
# Set OpenWind script name
testpath = '../templates/'
#owXMLname = testpath + 'rtecScript.xml' # replace turb, energy capture #KLD - this script does not work for me with this component
owXMLname = testpath + 'owacScript.xml' # optimize operation
#owXMLname = testpath + 'rtopScript.xml' # replace turb, optimize
if modify_turbine:
owXMLname = testpath + 'rtopScript.xml' # replace turb, optimize
if not os.path.isfile(owXMLname):
sys.stderr.write('OpenWind script file "{:}" not found\n'.format(owXMLname))
exit()
dscript = rwScriptXML.rdScript(owXMLname,debug=debug) # Show our operations
workbook = dscript['workbook']
# default turbine positions and size of translation
wt_positions = [[456000.00,4085000.00],
[456500.00,4085000.00]]
deltaX = 3000.0
deltaY = -2000.0
#deltaX = 200.0
#deltaY = -200.0
deltaX = 3.000
deltaY = -2.000
# Read turbine positions from workbook
if debug:
sys.stderr.write('Getting turbine positions from {:}\n'.format(workbook))
wb = acutils.WTWkbkFile(wkbk=workbook, owexe=owExe)
wt_positions = wb.xy
if debug:
sys.stderr.write('Got {:} turbine positions\n'.format(len(wt_positions)))
# Initialize OWACcomp component
ow = OWACcomp(owExe=owExe, debug=debug, scriptFile=owXMLname, start_once=start_once, opt_log=opt_log) #, stopOW=False)
if not ow.scriptOK:
sys.stderr.write("\n*** ERROR found in script file\n\n")
exit()
# starting point for turbine mods
#wt_list_elem = dummy_wt_list()
base_turbine_file = testpath + 'NREL5MW.owtg'
wt_list_elem = turbfuncs.owtg_to_wtpc(base_turbine_file)
ow.wt_layout.wt_list = [ wt_list_elem for i in range(len(wt_positions)) ]
if debug:
sys.stderr.write('Initialized {:} turbines in wt_layout\n'.format(len(wt_positions)))
# With each iteration
# move turbines farther offshore
# possibly modify the turbine rotor diam and power curve and replace turbine
if debug:
ofh = open('wtp.txt', 'w')
for irun in range(1,4):
for i in range(len(wt_positions)):
wt_positions[i][0] += deltaX
wt_positions[i][1] += deltaY
if debug:
ofh.write('{:2d} {:3d} {:.1f} {:.1f}\n'.format(irun, i, wt_positions[i][0], wt_positions[i][1]))
ow.wt_layout.wt_positions = wt_positions
# modify the turbine
ow.rotor_diameter += 1.0
if ow.replace_turbine:
wt_list_elem = ow.wt_layout.wt_list[0]
wt_list_elem.power_rating *= 1.05
for i in range(len(wt_list_elem.power_curve)):
wt_list_elem.power_curve[i][1] *= 1.05
ow.wt_layout.wt_list = [wt_list_elem for i in range(len(ow.wt_layout.wt_list)) ]
if debug:
ofh.write('Updated {:} turbines with:\n'.format(len(ow.wt_layout.wt_list)))
ofh.write(turbfuncs.wtpc_dump(ow.wt_layout.wt_list[0]))
ow.execute() # run the openWind process
print '\nFinal values'
owd = ow.dump()
print ' {:}'.format(owd)
print '-' * 40, '\n'
if start_once:
ow.terminateOW()
if __name__ == "__main__":
# Substitute your own path to Openwind Enterprise
#owExe = 'C:/Models/Openwind/openWind64_ac.exe'
owExe = 'D:/rassess/Openwind/openWind64_ac.exe' # Old Academic v.1275
owExe = 'D:/rassess/Openwind/openWind64.exe'
example(owExe)
|
dumpstr = ''
dumpstr += 'Gross {:10.4f} GWh Net {:10.4f} GWh from {:4d} turbines'.format(
self.gross_aep*0.000001,self.net_aep*0.000001, self.nTurbs)
#print dumpstr
return dumpstr
|
issue-73050.rs
|
// check-pass
// edition:2018
#[allow(unused)]
async fn foo<'a>() {
let _data = &mut [0u8; { 1 + 4 }];
bar().await
}
async fn bar() {}
|
fn main() {}
|
|
1572350407521-email.ts
|
import {MigrationInterface, QueryRunner} from "typeorm";
export class
|
implements MigrationInterface {
name = 'email1572350407521'
public async up(queryRunner: QueryRunner): Promise<any> {
await queryRunner.query("ALTER TABLE `user` ADD `email` varchar(255) NOT NULL", undefined);
await queryRunner.query("ALTER TABLE `user` ADD UNIQUE INDEX `IDX_e12875dfb3b1d92d7d7c5377e2` (`email`)", undefined);
}
public async down(queryRunner: QueryRunner): Promise<any> {
await queryRunner.query("ALTER TABLE `user` DROP INDEX `IDX_e12875dfb3b1d92d7d7c5377e2`", undefined);
await queryRunner.query("ALTER TABLE `user` DROP COLUMN `email`", undefined);
}
}
|
email1572350407521
|
setup.py
|
import setuptools
import pkg_resources
from setuptools import setup, Extension
def is_installed(requirement):
|
if not is_installed('numpy>=1.11.0'):
print("""
Error: numpy needs to be installed first. You can install it via:
$ pip install numpy
""")
exit(1)
if not is_installed('Cython>=0.29'):
print("""
Error: cython needs to be installed first. You can install it via:
$ pip install cython
""")
exit(1)
import numpy
from Cython.Distutils import build_ext
from Cython.Build import cythonize
with open("README.md", "r") as fh:
long_description = fh.read()
ida_dir = "ampere/models/ida"
ida_files = ['ida.c', 'ida_band.c', 'ida_dense.c', 'ida_direct.c', 'ida_ic.c', 'ida_io.c', 'nvector_serial.c', 'sundials_band.c', 'sundials_dense.c', 'sundials_direct.c', 'sundials_math.c', 'sundials_nvector.c']
ida_requirements1 = [ida_dir + '/' + ida_file for ida_file in ida_files]
ext_modules = [
Extension("ampere.models.P2D.P2D_fd", ["ampere/models/P2D/P2D_fd.pyx", "ampere/models/P2D/P2D_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd", ["ampere/models/SPM/SPM_fd.pyx", "ampere/models/SPM/SPM_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd_sei", ["ampere/models/SPM/SPM_fd_sei.pyx", "ampere/models/SPM/SPM_fd_sei.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_par", ["ampere/models/SPM/SPM_par.pyx", "ampere/models/SPM/SPM_par.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
]
cmdclass = {'build_ext': build_ext}
print(setuptools.find_packages())
setup(
name="ampere",
version="0.5.4",
author="Neal Dawson-Elli",
author_email="[email protected]",
description="A Python package for working with battery discharge data and physics-based battery models",
cmdclass=cmdclass,
ext_modules=cythonize(ext_modules, compiler_directives={'language_level' : "3"}),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nealde/Ampere",
packages=[*setuptools.find_packages()],
install_requires=['cython', 'matplotlib < 3.4', 'numpy', 'scipy'],
classifiers=[
"Programming Language :: Python :: 3",
'Programming Language :: Cython',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords="battery numerical simulation modeling",
)
|
try:
pkg_resources.require(requirement)
except pkg_resources.ResolutionError:
return False
else:
return True
|
_configuration.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
VERSION = "unknown"
class AzureMonitorClientConfiguration(Configuration):
"""Configuration for AzureMonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param host: Breeze endpoint: https://dc.services.visualstudio.com.
:type host: str
"""
def __init__(
self,
host: str = "https://dc.services.visualstudio.com",
**kwargs: Any
) -> None:
if host is None:
raise ValueError("Parameter 'host' must not be None.")
super(AzureMonitorClientConfiguration, self).__init__(**kwargs)
self.host = host
kwargs.setdefault('sdk_moniker', 'azuremonitorclient/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
|
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
|
|
main.rs
|
#![allow(clippy::integer_arithmetic)]
use clap::{
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings,
Arg, ArgMatches, SubCommand,
};
use itertools::Itertools;
use log::*;
use regex::Regex;
use serde::Serialize;
use serde_json::json;
use solana_clap_utils::{
input_parsers::{cluster_type_of, pubkey_of, pubkeys_of},
input_validators::{
is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage,
},
};
use solana_core::cost_model::CostModel;
use solana_core::cost_tracker::CostTracker;
use solana_entry::entry::Entry;
use solana_ledger::{
ancestor_iterator::AncestorIterator,
bank_forks_utils,
blockstore::{create_new_ledger, Blockstore, PurgeType},
blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database},
blockstore_processor::ProcessOptions,
shred::Shred,
};
use solana_runtime::{
bank::{Bank, RewardCalculationEvent},
bank_forks::BankForks,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
snapshot_config::SnapshotConfig,
snapshot_utils::{self, ArchiveFormat, SnapshotVersion, DEFAULT_MAX_SNAPSHOTS_TO_RETAIN},
};
use solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount},
clock::{Epoch, Slot},
feature::{self, Feature},
feature_set,
genesis_config::{ClusterType, GenesisConfig},
hash::Hash,
inflation::Inflation,
native_token::{lamports_to_sol, sol_to_lamports, Sol},
pubkey::Pubkey,
rent::Rent,
sanitized_transaction::SanitizedTransaction,
shred_version::compute_shred_version,
stake::{self, state::StakeState},
system_program,
};
use solana_stake_program::stake_state::{self, PointValue};
use solana_vote_program::{
self,
vote_state::{self, VoteState},
};
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
ffi::OsStr,
fs::{self, File},
io::{self, stdout, BufRead, BufReader, Write},
path::{Path, PathBuf},
process::{exit, Command, Stdio},
str::FromStr,
sync::{Arc, RwLock},
};
mod bigtable;
use bigtable::*;
#[derive(PartialEq)]
enum LedgerOutputMethod {
Print,
Json,
}
fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutputMethod) {
// Note: rewards are not output in JSON yet
if *method == LedgerOutputMethod::Print {
if let Ok(Some(rewards)) = blockstore.read_rewards(slot) {
if !rewards.is_empty() {
println!(" Rewards:");
println!(
" {:<44} {:^15} {:<15} {:<20} {:>10}",
"Address", "Type", "Amount", "New Balance", "Commission",
);
for reward in rewards {
let sign = if reward.lamports < 0 { "-" } else { "" };
println!(
" {:<44} {:^15} {:<15} {} {}",
reward.pubkey,
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
"-".to_string()
},
format!(
"{}◎{:<14.9}",
sign,
lamports_to_sol(reward.lamports.abs() as u64)
),
format!("◎{:<18.9}", lamports_to_sol(reward.post_balance)),
reward
.commission
.map(|commission| format!("{:>9}%", commission))
.unwrap_or_else(|| " -".to_string())
);
}
}
}
}
}
fn output_entry(
blockstore: &Blockstore,
method: &LedgerOutputMethod,
slot: Slot,
entry_index: usize,
entry: &Entry,
) {
match method {
LedgerOutputMethod::Print => {
println!(
" Entry {} - num_hashes: {}, hashes: {}, transactions: {}",
entry_index,
entry.num_hashes,
entry.hash,
entry.transactions.len()
);
for (transactions_index, transaction) in entry.transactions.iter().enumerate() {
println!(" Transaction {}", transactions_index);
let transaction_status = blockstore
.read_transaction_status((transaction.signatures[0], slot))
.unwrap_or_else(|err| {
eprintln!(
"Failed to read transaction status for {} at slot {}: {}",
transaction.signatures[0], slot, err
);
None
})
.map(|transaction_status| transaction_status.into());
solana_cli_output::display::println_transaction(
transaction,
&transaction_status,
" ",
None,
None,
);
}
}
LedgerOutputMethod::Json => {
// Note: transaction status is not output in JSON yet
serde_json::to_writer(stdout(), &entry).expect("serialize entry");
stdout().write_all(b",\n").expect("newline");
}
}
}
fn output_slot(
blockstore: &Blockstore,
slot: Slot,
allow_dead_slots: bool,
method: &LedgerOutputMethod,
verbose_level: u64,
) -> Result<(), String> {
if blockstore.is_dead(slot) {
if allow_dead_slots {
if *method == LedgerOutputMethod::Print {
println!(" Slot is dead");
}
} else {
return Err("Dead slot".to_string());
}
}
let (entries, num_shreds, _is_full) = blockstore
.get_slot_entries_with_shred_info(slot, 0, allow_dead_slots)
.map_err(|err| format!("Failed to load entries for slot {}: {:?}", slot, err))?;
if *method == LedgerOutputMethod::Print {
if let Ok(Some(meta)) = blockstore.meta(slot) {
if verbose_level >= 2 {
println!(" Slot Meta {:?}", meta);
} else {
println!(
" num_shreds: {} parent_slot: {} num_entries: {}",
num_shreds,
meta.parent_slot,
entries.len()
);
}
}
}
if verbose_level >= 2 {
for (entry_index, entry) in entries.iter().enumerate() {
output_entry(blockstore, method, slot, entry_index, entry);
}
output_slot_rewards(blockstore, slot, method);
} else if verbose_level >= 1 {
let mut transactions = 0;
let mut hashes = 0;
let mut program_ids = HashMap::new();
for entry in &entries {
transactions += entry.transactions.len();
hashes += entry.num_hashes;
for transaction in &entry.transactions {
for instruction in &transaction.message().instructions {
let program_id =
transaction.message().account_keys[instruction.program_id_index as usize];
*program_ids.entry(program_id).or_insert(0) += 1;
}
}
}
let hash = if let Some(entry) = entries.last() {
entry.hash
} else {
Hash::default()
};
println!(
" Transactions: {} hashes: {} block_hash: {}",
transactions, hashes, hash,
);
println!(" Programs: {:?}", program_ids);
}
Ok(())
}
fn output_ledger(
blockstore: Blockstore,
starting_slot: Slot,
ending_slot: Slot,
allow_dead_slots: bool,
method: LedgerOutputMethod,
num_slots: Option<Slot>,
verbose_level: u64,
only_rooted: bool,
) {
let slot_iterator = blockstore
.slot_meta_iterator(starting_slot)
.unwrap_or_else(|err| {
eprintln!(
"Failed to load entries starting from slot {}: {:?}",
starting_slot, err
);
exit(1);
});
if method == LedgerOutputMethod::Json {
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
}
let num_slots = num_slots.unwrap_or(Slot::MAX);
let mut num_printed = 0;
for (slot, slot_meta) in slot_iterator {
if only_rooted && !blockstore.is_root(slot) {
continue;
}
if slot > ending_slot {
break;
}
match method {
LedgerOutputMethod::Print => {
println!("Slot {} root?: {}", slot, blockstore.is_root(slot))
}
LedgerOutputMethod::Json => {
serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta");
stdout().write_all(b",\n").expect("newline");
}
}
if let Err(err) = output_slot(&blockstore, slot, allow_dead_slots, &method, verbose_level) {
eprintln!("{}", err);
}
num_printed += 1;
if num_printed >= num_slots as usize {
break;
}
}
if method == LedgerOutputMethod::Json {
stdout().write_all(b"\n]}\n").expect("close array");
}
}
fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result<()> {
let mut child = Command::new("dot")
.arg(format!("-T{}", output_format))
.arg(format!("-o{}", output_file))
.stdin(Stdio::piped())
.spawn()
.map_err(|err| {
eprintln!("Failed to spawn dot: {:?}", err);
err
})?;
let stdin = child.stdin.as_mut().unwrap();
stdin.write_all(&dot.into_bytes())?;
let status = child.wait_with_output()?.status;
if !status.success() {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("dot failed with error {}", status.code().unwrap_or(-1)),
));
}
Ok(())
}
#[allow(clippy::cognitive_complexity)]
fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String {
let frozen_banks = bank_forks.frozen_banks();
let mut fork_slots: HashSet<_> = frozen_banks.keys().cloned().collect();
for (_, bank) in frozen_banks {
for parent in bank.parents() {
fork_slots.remove(&parent.slot());
}
}
// Search all forks and collect the last vote made by each validator
let mut last_votes = HashMap::new();
let default_vote_state = VoteState::default();
for fork_slot in &fork_slots {
let bank = &bank_forks[*fork_slot];
let total_stake = bank
.vote_accounts()
.iter()
.map(|(_, (stake, _))| stake)
.sum();
for (_, (stake, vote_account)) in bank.vote_accounts() {
let vote_state = vote_account.vote_state();
let vote_state = vote_state.as_ref().unwrap_or(&default_vote_state);
if let Some(last_vote) = vote_state.votes.iter().last() {
let entry = last_votes.entry(vote_state.node_pubkey).or_insert((
last_vote.slot,
vote_state.clone(),
stake,
total_stake,
));
if entry.0 < last_vote.slot {
*entry = (last_vote.slot, vote_state.clone(), stake, total_stake);
}
}
}
}
// Figure the stake distribution at all the nodes containing the last vote from each
// validator
let mut slot_stake_and_vote_count = HashMap::new();
for (last_vote_slot, _, stake, total_stake) in last_votes.values() {
let entry = slot_stake_and_vote_count
.entry(last_vote_slot)
.or_insert((0, 0, *total_stake));
entry.0 += 1;
entry.1 += stake;
assert_eq!(entry.2, *total_stake)
}
let mut dot = vec!["digraph {".to_string()];
// Build a subgraph consisting of all banks and links to their parent banks
dot.push(" subgraph cluster_banks {".to_string());
dot.push(" style=invis".to_string());
let mut styled_slots = HashSet::new();
let mut all_votes: HashMap<Pubkey, HashMap<Slot, VoteState>> = HashMap::new();
for fork_slot in &fork_slots {
let mut bank = bank_forks[*fork_slot].clone();
let mut first = true;
loop {
for (_, (_, vote_account)) in bank.vote_accounts() {
let vote_state = vote_account.vote_state();
let vote_state = vote_state.as_ref().unwrap_or(&default_vote_state);
if let Some(last_vote) = vote_state.votes.iter().last() {
let validator_votes = all_votes.entry(vote_state.node_pubkey).or_default();
validator_votes
.entry(last_vote.slot)
.or_insert_with(|| vote_state.clone());
}
}
if !styled_slots.contains(&bank.slot()) {
dot.push(format!(
r#" "{}"[label="{} (epoch {})\nleader: {}{}{}",style="{}{}"];"#,
bank.slot(),
bank.slot(),
bank.epoch(),
bank.collector_id(),
if let Some(parent) = bank.parent() {
format!(
"\ntransactions: {}",
bank.transaction_count() - parent.transaction_count(),
)
} else {
"".to_string()
},
if let Some((votes, stake, total_stake)) =
slot_stake_and_vote_count.get(&bank.slot())
{
format!(
"\nvotes: {}, stake: {:.1} SOL ({:.1}%)",
votes,
lamports_to_sol(*stake),
*stake as f64 / *total_stake as f64 * 100.,
)
} else {
"".to_string()
},
if first { "filled," } else { "" },
""
));
styled_slots.insert(bank.slot());
}
first = false;
match bank.parent() {
None => {
if bank.slot() > 0 {
dot.push(format!(r#" "{}" -> "..." [dir=back]"#, bank.slot(),));
}
break;
}
Some(parent) => {
let slot_distance = bank.slot() - parent.slot();
let penwidth = if bank.epoch() > parent.epoch() {
"5"
} else {
"1"
};
let link_label = if slot_distance > 1 {
format!("label=\"{} slots\",color=red", slot_distance)
} else {
"color=blue".to_string()
};
dot.push(format!(
r#" "{}" -> "{}"[{},dir=back,penwidth={}];"#,
bank.slot(),
parent.slot(),
link_label,
penwidth
));
bank = parent.clone();
}
}
}
}
dot.push(" }".to_string());
// Strafe the banks with links from validators to the bank they last voted on,
// while collecting information about the absent votes and stakes
let mut absent_stake = 0;
let mut absent_votes = 0;
let mut lowest_last_vote_slot = std::u64::MAX;
let mut lowest_total_stake = 0;
for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes {
all_votes.entry(*node_pubkey).and_modify(|validator_votes| {
validator_votes.remove(last_vote_slot);
});
dot.push(format!(
r#" "last vote {}"[shape=box,label="Latest validator vote: {}\nstake: {} SOL\nroot slot: {}\nvote history:\n{}"];"#,
node_pubkey,
node_pubkey,
lamports_to_sol(*stake),
vote_state.root_slot.unwrap_or(0),
vote_state
.votes
.iter()
.map(|vote| format!("slot {} (conf={})", vote.slot, vote.confirmation_count))
.collect::<Vec<_>>()
.join("\n")
));
dot.push(format!(
r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#,
node_pubkey,
if styled_slots.contains(last_vote_slot) {
last_vote_slot.to_string()
} else {
if *last_vote_slot < lowest_last_vote_slot {
lowest_last_vote_slot = *last_vote_slot;
lowest_total_stake = *total_stake;
}
absent_votes += 1;
absent_stake += stake;
"...".to_string()
},
));
}
// Annotate the final "..." node with absent vote and stake information
if absent_votes > 0 {
dot.push(format!(
r#" "..."[label="...\nvotes: {}, stake: {:.1} SOL {:.1}%"];"#,
absent_votes,
lamports_to_sol(absent_stake),
absent_stake as f64 / lowest_total_stake as f64 * 100.,
));
}
// Add for vote information from all banks.
if include_all_votes {
for (node_pubkey, validator_votes) in &all_votes {
for (vote_slot, vote_state) in validator_votes {
dot.push(format!(
r#" "{} vote {}"[shape=box,style=dotted,label="validator vote: {}\nroot slot: {}\nvote history:\n{}"];"#,
node_pubkey,
vote_slot,
node_pubkey,
vote_state.root_slot.unwrap_or(0),
vote_state
.votes
.iter()
.map(|vote| format!("slot {} (conf={})", vote.slot, vote.confirmation_count))
.collect::<Vec<_>>()
.join("\n")
));
dot.push(format!(
r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#,
node_pubkey,
vote_slot,
if styled_slots.contains(vote_slot) {
vote_slot.to_string()
} else {
"...".to_string()
},
));
}
}
}
dot.push("}".to_string());
dot.join("\n")
}
fn analyze_column<
T: solana_ledger::blockstore_db::Column + solana_ledger::blockstore_db::ColumnName,
>(
db: &Database,
name: &str,
key_size: usize,
) {
let mut key_tot: u64 = 0;
let mut val_hist = histogram::Histogram::new();
let mut val_tot: u64 = 0;
let mut row_hist = histogram::Histogram::new();
let a = key_size as u64;
for (_x, y) in db.iter::<T>(blockstore_db::IteratorMode::Start).unwrap() {
let b = y.len() as u64;
key_tot += a;
val_hist.increment(b).unwrap();
val_tot += b;
row_hist.increment(a + b).unwrap();
}
let json_result = if val_hist.entries() > 0 {
json!({
"column":name,
"entries":val_hist.entries(),
"key_stats":{
"max":a,
"total_bytes":key_tot,
},
"val_stats":{
"p50":val_hist.percentile(50.0).unwrap(),
"p90":val_hist.percentile(90.0).unwrap(),
"p99":val_hist.percentile(99.0).unwrap(),
"p999":val_hist.percentile(99.9).unwrap(),
"min":val_hist.minimum().unwrap(),
"max":val_hist.maximum().unwrap(),
"stddev":val_hist.stddev().unwrap(),
"total_bytes":val_tot,
},
"row_stats":{
"p50":row_hist.percentile(50.0).unwrap(),
"p90":row_hist.percentile(90.0).unwrap(),
"p99":row_hist.percentile(99.0).unwrap(),
"p999":row_hist.percentile(99.9).unwrap(),
"min":row_hist.minimum().unwrap(),
"max":row_hist.maximum().unwrap(),
"stddev":row_hist.stddev().unwrap(),
"total_bytes":key_tot + val_tot,
},
})
} else {
json!({
"column":name,
"entries":val_hist.entries(),
"key_stats":{
"max":a,
"total_bytes":0,
},
"val_stats":{
"total_bytes":0,
},
"row_stats":{
"total_bytes":0,
},
})
};
println!("{}", serde_json::to_string_pretty(&json_result).unwrap());
}
fn analyze_storage(database: &Database) {
use blockstore_db::columns::*;
analyze_column::<SlotMeta>(database, "SlotMeta", SlotMeta::key_size());
analyze_column::<Orphans>(database, "Orphans", Orphans::key_size());
analyze_column::<DeadSlots>(database, "DeadSlots", DeadSlots::key_size());
analyze_column::<ErasureMeta>(database, "ErasureMeta", ErasureMeta::key_size());
analyze_column::<Root>(database, "Root", Root::key_size());
analyze_column::<Index>(database, "Index", Index::key_size());
analyze_column::<ShredData>(database, "ShredData", ShredData::key_size());
analyze_column::<ShredCode>(database, "ShredCode", ShredCode::key_size());
analyze_column::<TransactionStatus>(
database,
"TransactionStatus",
TransactionStatus::key_size(),
);
analyze_column::<TransactionStatus>(
database,
"TransactionStatusIndex",
TransactionStatusIndex::key_size(),
);
analyze_column::<AddressSignatures>(
database,
"AddressSignatures",
AddressSignatures::key_size(),
);
analyze_column::<Rewards>(database, "Rewards", Rewards::key_size());
}
fn open_blockstore(
ledger_path: &Path,
access_type: AccessType,
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
) -> Blockstore {
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode, true) {
Ok(blockstore) => blockstore,
Err(err) => {
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
exit(1);
}
}
}
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
match Database::open(&ledger_path.join("rocksdb"), access_type, None) {
Ok(database) => database,
Err(err) => {
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
exit(1);
}
}
}
// This function is duplicated in validator/src/main.rs...
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
if matches.is_present(name) {
Some(values_t_or_exit!(matches, name, Slot))
} else {
None
}
}
fn load
|
arg_matches: &ArgMatches,
genesis_config: &GenesisConfig,
blockstore: &Blockstore,
process_options: ProcessOptions,
snapshot_archive_path: Option<PathBuf>,
) -> bank_forks_utils::LoadResult {
let snapshot_path = blockstore
.ledger_path()
.join(if blockstore.is_primary_access() {
"snapshot"
} else {
"snapshot.ledger-tool"
});
let snapshot_config = if arg_matches.is_present("no_snapshot") {
None
} else {
let snapshot_package_output_path =
snapshot_archive_path.unwrap_or_else(|| blockstore.ledger_path().to_path_buf());
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path,
snapshot_path,
archive_format: ArchiveFormat::TarBzip2,
snapshot_version: SnapshotVersion::default(),
maximum_snapshots_to_retain: DEFAULT_MAX_SNAPSHOTS_TO_RETAIN,
})
};
let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") {
if !blockstore.is_primary_access() {
// Be defensive, when default account dir is explicitly specified, it's still possible
// to wipe the dir possibly shared by the running validator!
eprintln!("Error: custom accounts path is not supported under secondary access");
exit(1);
}
account_paths.split(',').map(PathBuf::from).collect()
} else if blockstore.is_primary_access() {
vec![blockstore.ledger_path().join("accounts")]
} else {
let non_primary_accounts_path = blockstore.ledger_path().join("accounts.ledger-tool");
warn!(
"Default accounts path is switched aligning with Blockstore's secondary access: {:?}",
non_primary_accounts_path
);
vec![non_primary_accounts_path]
};
bank_forks_utils::load(
genesis_config,
blockstore,
account_paths,
None,
snapshot_config.as_ref(),
process_options,
None,
None,
)
}
fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> {
if blockstore.is_dead(slot) {
return Err("Dead slot".to_string());
}
let (entries, _num_shreds, _is_full) = blockstore
.get_slot_entries_with_shred_info(slot, 0, false)
.map_err(|err| format!(" Slot: {}, Failed to load entries, err {:?}", slot, err))?;
let mut transactions = 0;
let mut programs = 0;
let mut program_ids = HashMap::new();
let mut cost_model = CostModel::default();
cost_model.initialize_cost_table(&blockstore.read_program_costs().unwrap());
let cost_model = Arc::new(RwLock::new(cost_model));
let mut cost_tracker = CostTracker::new(cost_model.clone());
for entry in &entries {
transactions += entry.transactions.len();
let mut cost_model = cost_model.write().unwrap();
for transaction in &entry.transactions {
programs += transaction.message().instructions.len();
let transaction =
match SanitizedTransaction::try_create(Cow::Borrowed(transaction), Hash::default())
{
Ok(tx) => tx,
Err(err) => {
warn!(
"failed to sanitize transaction, err {:?}, tx {:?}",
err, transaction
);
continue;
}
};
let tx_cost = cost_model.calculate_cost(&transaction);
if cost_tracker.try_add(tx_cost).is_err() {
println!(
"Slot: {}, CostModel rejected transaction {:?}, stats {:?}!",
slot,
transaction,
cost_tracker.get_stats()
);
}
for instruction in &transaction.message().instructions {
let program_id =
transaction.message().account_keys[instruction.program_id_index as usize];
*program_ids.entry(program_id).or_insert(0) += 1;
}
}
}
println!(
"Slot: {}, Entries: {}, Transactions: {}, Programs {}, {:?}",
slot,
entries.len(),
transactions,
programs,
cost_tracker.get_stats()
);
println!(" Programs: {:?}", program_ids);
Ok(())
}
fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> GenesisConfig {
let max_genesis_archive_unpacked_size =
value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64);
open_genesis_config(ledger_path, max_genesis_archive_unpacked_size)
}
fn assert_capitalization(bank: &Bank) {
let debug_verify = true;
assert!(bank.calculate_and_verify_capitalization(debug_verify));
}
#[allow(clippy::cognitive_complexity)]
fn main() {
// Ignore SIGUSR1 to prevent long-running calls being killed by logrotate
// in warehouse deployments
#[cfg(unix)]
{
// `register()` is unsafe because the action is called in a signal handler
// with the usual caveats. So long as this action body stays empty, we'll
// be fine
unsafe { signal_hook::register(signal_hook::SIGUSR1, || {}) }.unwrap();
}
const DEFAULT_ROOT_COUNT: &str = "1";
const DEFAULT_MAX_SLOTS_ROOT_REPAIR: &str = "2000";
solana_logger::setup_with_default("solana=info");
let starting_slot_arg = Arg::with_name("starting_slot")
.long("starting-slot")
.value_name("NUM")
.takes_value(true)
.default_value("0")
.help("Start at this slot");
let ending_slot_arg = Arg::with_name("ending_slot")
.long("ending-slot")
.value_name("SLOT")
.takes_value(true)
.help("The last slot to iterate to");
let no_snapshot_arg = Arg::with_name("no_snapshot")
.long("no-snapshot")
.takes_value(false)
.help("Do not start from a local snapshot if present");
let no_bpf_jit_arg = Arg::with_name("no_bpf_jit")
.long("no-bpf-jit")
.takes_value(false)
.help("Disable the just-in-time compiler and instead use the interpreter for BP");
let no_accounts_db_caching_arg = Arg::with_name("no_accounts_db_caching")
.long("no-accounts-db-caching")
.takes_value(false)
.help("Disables accounts-db caching");
let account_paths_arg = Arg::with_name("account_paths")
.long("accounts")
.value_name("PATHS")
.takes_value(true)
.help("Comma separated persistent accounts location");
let accounts_db_test_hash_calculation_arg = Arg::with_name("accounts_db_test_hash_calculation")
.long("accounts-db-test-hash-calculation")
.help("Enable hash calculation test");
let halt_at_slot_arg = Arg::with_name("halt_at_slot")
.long("halt-at-slot")
.value_name("SLOT")
.validator(is_slot)
.takes_value(true)
.help("Halt processing at the given slot");
let verify_index_arg = Arg::with_name("verify_accounts_index")
.long("verify-accounts-index")
.takes_value(false)
.help("For debugging and tests on accounts index.");
let limit_load_slot_count_from_snapshot_arg = Arg::with_name("limit_load_slot_count_from_snapshot")
.long("limit-load-slot-count-from-snapshot")
.value_name("SLOT")
.validator(is_slot)
.takes_value(true)
.help("For debugging and profiling with large snapshots, artificially limit how many slots are loaded from a snapshot.");
let hard_forks_arg = Arg::with_name("hard_forks")
.long("hard-fork")
.value_name("SLOT")
.validator(is_slot)
.multiple(true)
.takes_value(true)
.help("Add a hard fork at this slot");
let allow_dead_slots_arg = Arg::with_name("allow_dead_slots")
.long("allow-dead-slots")
.takes_value(false)
.help("Output dead slots as well");
let default_genesis_archive_unpacked_size = MAX_GENESIS_ARCHIVE_UNPACKED_SIZE.to_string();
let max_genesis_archive_unpacked_size_arg = Arg::with_name("max_genesis_archive_unpacked_size")
.long("max-genesis-archive-unpacked-size")
.value_name("NUMBER")
.takes_value(true)
.default_value(&default_genesis_archive_unpacked_size)
.help("maximum total uncompressed size of unpacked genesis archive");
let hashes_per_tick = Arg::with_name("hashes_per_tick")
.long("hashes-per-tick")
.value_name("NUM_HASHES|\"sleep\"")
.takes_value(true)
.help(
"How many PoH hashes to roll before emitting the next tick. \
If \"sleep\", for development \
sleep for the target tick duration instead of hashing",
);
let snapshot_version_arg = Arg::with_name("snapshot_version")
.long("snapshot-version")
.value_name("SNAPSHOT_VERSION")
.validator(is_parsable::<SnapshotVersion>)
.takes_value(true)
.default_value(SnapshotVersion::default().into())
.help("Output snapshot version");
let default_max_snapshot_to_retain = &DEFAULT_MAX_SNAPSHOTS_TO_RETAIN.to_string();
let maximum_snapshots_to_retain_arg = Arg::with_name("maximum_snapshots_to_retain")
.long("maximum-snapshots-to-retain")
.value_name("NUMBER")
.takes_value(true)
.default_value(default_max_snapshot_to_retain)
.help("Maximum number of snapshots to hold on to during snapshot purge");
let rent = Rent::default();
let default_bootstrap_validator_lamports = &sol_to_lamports(500.0)
.max(VoteState::get_rent_exempt_reserve(&rent))
.to_string();
let default_bootstrap_validator_stake_lamports = &sol_to_lamports(0.5)
.max(StakeState::get_rent_exempt_reserve(&rent))
.to_string();
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.setting(AppSettings::InferSubcommands)
.setting(AppSettings::SubcommandRequiredElseHelp)
.setting(AppSettings::VersionlessSubcommands)
.arg(
Arg::with_name("ledger_path")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.global(true)
.default_value("ledger")
.help("Use DIR as ledger location"),
)
.arg(
Arg::with_name("wal_recovery_mode")
.long("wal-recovery-mode")
.value_name("MODE")
.takes_value(true)
.global(true)
.possible_values(&[
"tolerate_corrupted_tail_records",
"absolute_consistency",
"point_in_time",
"skip_any_corrupted_record"])
.help(
"Mode to recovery the ledger db write ahead log"
),
)
.arg(
Arg::with_name("snapshot_archive_path")
.long("snapshot-archive-path")
.value_name("DIR")
.takes_value(true)
.global(true)
.help("Use DIR for ledger location"),
)
.arg(
Arg::with_name("output_format")
.long("output")
.value_name("FORMAT")
.global(true)
.takes_value(true)
.possible_values(&["json", "json-compact"])
.help("Return information in specified output format, \
currently only available for bigtable subcommands"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.long("verbose")
.global(true)
.multiple(true)
.takes_value(false)
.help("Show additional information where supported"),
)
.bigtable_subcommand()
.subcommand(
SubCommand::with_name("print")
.about("Print the ledger")
.arg(&starting_slot_arg)
.arg(&allow_dead_slots_arg)
.arg(&ending_slot_arg)
.arg(
Arg::with_name("num_slots")
.long("num-slots")
.value_name("SLOT")
.validator(is_slot)
.takes_value(true)
.help("Number of slots to print"),
)
.arg(
Arg::with_name("only_rooted")
.long("only-rooted")
.takes_value(false)
.help("Only print root slots"),
)
)
.subcommand(
SubCommand::with_name("copy")
.about("Copy the ledger")
.arg(&starting_slot_arg)
.arg(&ending_slot_arg)
.arg(
Arg::with_name("target_db")
.long("target-db")
.value_name("PATH")
.takes_value(true)
.help("Target db"),
)
)
.subcommand(
SubCommand::with_name("slot")
.about("Print the contents of one or more slots")
.arg(
Arg::with_name("slots")
.index(1)
.value_name("SLOTS")
.validator(is_slot)
.takes_value(true)
.multiple(true)
.required(true)
.help("Slots to print"),
)
.arg(&allow_dead_slots_arg)
)
.subcommand(
SubCommand::with_name("dead-slots")
.arg(&starting_slot_arg)
.about("Print all the dead slots in the ledger")
)
.subcommand(
SubCommand::with_name("duplicate-slots")
.arg(&starting_slot_arg)
.about("Print all the duplicate slots in the ledger")
)
.subcommand(
SubCommand::with_name("set-dead-slot")
.about("Mark one or more slots dead")
.arg(
Arg::with_name("slots")
.index(1)
.value_name("SLOTS")
.validator(is_slot)
.takes_value(true)
.multiple(true)
.required(true)
.help("Slots to mark dead"),
)
)
.subcommand(
SubCommand::with_name("genesis")
.about("Prints the ledger's genesis config")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("parse_full_frozen")
.about("Parses log for information about critical events about \
ancestors of the given `ending_slot`")
.arg(&starting_slot_arg)
.arg(&ending_slot_arg)
.arg(
Arg::with_name("log_path")
.long("log-path")
.value_name("PATH")
.takes_value(true)
.help("path to log file to parse"),
)
)
.subcommand(
SubCommand::with_name("genesis-hash")
.about("Prints the ledger's genesis hash")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("modify-genesis")
.about("Modifies genesis parameters")
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(&hashes_per_tick)
.arg(
Arg::with_name("cluster_type")
.long("cluster-type")
.possible_values(&ClusterType::STRINGS)
.takes_value(true)
.help(
"Selects the features that will be enabled for the cluster"
),
)
.arg(
Arg::with_name("output_directory")
.index(1)
.value_name("DIR")
.takes_value(true)
.help("Output directory for the modified genesis config"),
)
)
.subcommand(
SubCommand::with_name("shred-version")
.about("Prints the ledger's shred hash")
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("shred-meta")
.about("Prints raw shred metadata")
.arg(&starting_slot_arg)
.arg(&ending_slot_arg)
)
.subcommand(
SubCommand::with_name("bank-hash")
.about("Prints the hash of the working bank after reading the ledger")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("bounds")
.about("Print lowest and highest non-empty slots. \
Note that there may be empty slots within the bounds")
.arg(
Arg::with_name("all")
.long("all")
.takes_value(false)
.required(false)
.help("Additionally print all the non-empty slots within the bounds"),
)
).subcommand(
SubCommand::with_name("json")
.about("Print the ledger in JSON format")
.arg(&starting_slot_arg)
.arg(&allow_dead_slots_arg)
)
.subcommand(
SubCommand::with_name("verify")
.about("Verify the ledger")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&limit_load_slot_count_from_snapshot_arg)
.arg(&verify_index_arg)
.arg(&hard_forks_arg)
.arg(&no_accounts_db_caching_arg)
.arg(&accounts_db_test_hash_calculation_arg)
.arg(&no_bpf_jit_arg)
.arg(&allow_dead_slots_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("skip_poh_verify")
.long("skip-poh-verify")
.takes_value(false)
.help("Skip ledger PoH verification"),
)
.arg(
Arg::with_name("print_accounts_stats")
.long("print-accounts-stats")
.takes_value(false)
.help("After verifying the ledger, print some information about the account stores"),
)
).subcommand(
SubCommand::with_name("graph")
.about("Create a Graphviz rendering of the ledger")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("include_all_votes")
.long("include-all-votes")
.help("Include all votes in the graph"),
)
.arg(
Arg::with_name("graph_filename")
.index(1)
.value_name("FILENAME")
.takes_value(true)
.help("Output file"),
)
).subcommand(
SubCommand::with_name("create-snapshot")
.about("Create a new ledger snapshot")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(&snapshot_version_arg)
.arg(&maximum_snapshots_to_retain_arg)
.arg(
Arg::with_name("snapshot_slot")
.index(1)
.value_name("SLOT")
.validator(|value| {
if value.parse::<Slot>().is_ok()
|| value == "ROOT"
{
Ok(())
} else {
Err(format!(
"Unable to parse as a number or the keyword ROOT, provided: {}",
value
))
}
})
.takes_value(true)
.help("Slot at which to create the snapshot; accepts keyword ROOT for the highest root"),
)
.arg(
Arg::with_name("output_directory")
.index(2)
.value_name("DIR")
.takes_value(true)
.help("Output directory for the snapshot [default: --ledger directory]"),
)
.arg(
Arg::with_name("warp_slot")
.required(false)
.long("warp-slot")
.takes_value(true)
.value_name("WARP_SLOT")
.validator(is_slot)
.help("After loading the snapshot slot warp the ledger to WARP_SLOT, \
which could be a slot in a galaxy far far away"),
)
.arg(
Arg::with_name("faucet_lamports")
.short("t")
.long("faucet-lamports")
.value_name("LAMPORTS")
.takes_value(true)
.requires("faucet_pubkey")
.help("Number of lamports to assign to the faucet"),
)
.arg(
Arg::with_name("faucet_pubkey")
.short("m")
.long("faucet-pubkey")
.value_name("PUBKEY")
.takes_value(true)
.validator(is_pubkey_or_keypair)
.requires("faucet_lamports")
.help("Path to file containing the faucet's pubkey"),
)
.arg(
Arg::with_name("bootstrap_validator")
.short("b")
.long("bootstrap-validator")
.value_name("IDENTITY_PUBKEY VOTE_PUBKEY STAKE_PUBKEY")
.takes_value(true)
.validator(is_pubkey_or_keypair)
.number_of_values(3)
.multiple(true)
.help("The bootstrap validator's identity, vote and stake pubkeys"),
)
.arg(
Arg::with_name("bootstrap_stake_authorized_pubkey")
.long("bootstrap-stake-authorized-pubkey")
.value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY")
.takes_value(true)
.validator(is_pubkey_or_keypair)
.help(
"Path to file containing the pubkey authorized to manage the bootstrap \
validator's stake [default: --bootstrap-validator IDENTITY_PUBKEY]",
),
)
.arg(
Arg::with_name("bootstrap_validator_lamports")
.long("bootstrap-validator-lamports")
.value_name("LAMPORTS")
.takes_value(true)
.default_value(default_bootstrap_validator_lamports)
.help("Number of lamports to assign to the bootstrap validator"),
)
.arg(
Arg::with_name("bootstrap_validator_stake_lamports")
.long("bootstrap-validator-stake-lamports")
.value_name("LAMPORTS")
.takes_value(true)
.default_value(default_bootstrap_validator_stake_lamports)
.help("Number of lamports to assign to the bootstrap validator's stake account"),
)
.arg(
Arg::with_name("rent_burn_percentage")
.long("rent-burn-percentage")
.value_name("NUMBER")
.takes_value(true)
.help("Adjust percentage of collected rent to burn")
.validator(is_valid_percentage),
)
.arg(&hashes_per_tick)
.arg(
Arg::with_name("accounts_to_remove")
.required(false)
.long("remove-account")
.takes_value(true)
.value_name("PUBKEY")
.validator(is_pubkey)
.multiple(true)
.help("List of accounts to remove while creating the snapshot"),
)
.arg(
Arg::with_name("remove_stake_accounts")
.required(false)
.long("remove-stake-accounts")
.takes_value(false)
.help("Remove all existing stake accounts from the new snapshot")
)
).subcommand(
SubCommand::with_name("accounts")
.about("Print account contents after processing in the ledger")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(
Arg::with_name("include_sysvars")
.long("include-sysvars")
.takes_value(false)
.help("Include sysvars too"),
)
.arg(
Arg::with_name("exclude_account_data")
.long("exclude-account-data")
.takes_value(false)
.help("Exclude account data (useful for large number of accounts)"),
)
.arg(&max_genesis_archive_unpacked_size_arg)
).subcommand(
SubCommand::with_name("capitalization")
.about("Print capitalization (aka, total supply) while checksumming it")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("warp_epoch")
.required(false)
.long("warp-epoch")
.takes_value(true)
.value_name("WARP_EPOCH")
.help("After loading the snapshot warp the ledger to WARP_EPOCH, \
which could be an epoch in a galaxy far far away"),
)
.arg(
Arg::with_name("inflation")
.required(false)
.long("inflation")
.takes_value(true)
.possible_values(&["pico", "full", "none"])
.help("Overwrite inflation when warping"),
)
.arg(
Arg::with_name("enable_stake_program_v2")
.required(false)
.long("enable-stake-program-v2")
.takes_value(false)
.help("Enable stake program v2 (several inflation-related staking \
bugs are feature-gated behind this)"),
)
.arg(
Arg::with_name("recalculate_capitalization")
.required(false)
.long("recalculate-capitalization")
.takes_value(false)
.help("Recalculate capitalization before warping; circumvents \
bank's out-of-sync capitalization"),
)
.arg(
Arg::with_name("csv_filename")
.long("csv-filename")
.value_name("FILENAME")
.takes_value(true)
.help("Output file in the csv format"),
)
).subcommand(
SubCommand::with_name("purge")
.about("Delete a range of slots from the ledger")
.arg(
Arg::with_name("start_slot")
.index(1)
.value_name("SLOT")
.takes_value(true)
.required(true)
.help("Start slot to purge from (inclusive)"),
)
.arg(
Arg::with_name("end_slot")
.index(2)
.value_name("SLOT")
.help("Ending slot to stop purging (inclusive) \
[default: the highest slot in the ledger]"),
)
.arg(
Arg::with_name("batch_size")
.long("batch-size")
.value_name("NUM")
.takes_value(true)
.default_value("1000")
.help("Removes at most BATCH_SIZE slots while purging in loop"),
)
.arg(
Arg::with_name("no_compaction")
.long("no-compaction")
.required(false)
.takes_value(false)
.help("Skip ledger compaction after purge")
)
.arg(
Arg::with_name("dead_slots_only")
.long("dead-slots-only")
.required(false)
.takes_value(false)
.help("Limit purging to dead slots only")
)
)
.subcommand(
SubCommand::with_name("list-roots")
.about("Output up to last <num-roots> root hashes and their \
heights starting at the given block height")
.arg(
Arg::with_name("max_height")
.long("max-height")
.value_name("NUM")
.takes_value(true)
.help("Maximum block height")
)
.arg(
Arg::with_name("start_root")
.long("start-root")
.value_name("NUM")
.takes_value(true)
.help("First root to start searching from")
)
.arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.required(false)
.takes_value(true)
.help("The location of the output YAML file. A list of \
rollback slot heights and hashes will be written to the file")
)
.arg(
Arg::with_name("num_roots")
.long("num-roots")
.value_name("NUM")
.takes_value(true)
.default_value(DEFAULT_ROOT_COUNT)
.required(false)
.help("Number of roots in the output"),
)
)
.subcommand(
SubCommand::with_name("repair-roots")
.about("Traverses the AncestorIterator backward from a last known root \
to restore missing roots to the Root column")
.arg(
Arg::with_name("start_root")
.long("before")
.value_name("NUM")
.takes_value(true)
.help("First good root after the range to repair")
)
.arg(
Arg::with_name("end_root")
.long("until")
.value_name("NUM")
.takes_value(true)
.help("Last slot to check for root repair")
)
.arg(
Arg::with_name("max_slots")
.long("repair-limit")
.value_name("NUM")
.takes_value(true)
.default_value(DEFAULT_MAX_SLOTS_ROOT_REPAIR)
.required(true)
.help("Override the maximum number of slots to check for root repair")
)
)
.subcommand(
SubCommand::with_name("analyze-storage")
.about("Output statistics in JSON format about \
all column families in the ledger rocksdb")
)
.subcommand(
SubCommand::with_name("compute-slot-cost")
.about("runs cost_model over the block at the given slots, \
computes how expensive a block was based on cost_model")
.arg(
Arg::with_name("slots")
.index(1)
.value_name("SLOTS")
.validator(is_slot)
.multiple(true)
.takes_value(true)
.help("Slots that their blocks are computed for cost, default to all slots in ledger"),
)
)
.get_matches();
info!("{} {}", crate_name!(), solana_version::version!());
let ledger_path = PathBuf::from(value_t!(matches, "ledger_path", String).unwrap_or_else(
|_err| {
eprintln!(
"Error: Missing --ledger <DIR> argument.\n\n{}",
matches.usage()
);
exit(1);
},
));
// Canonicalize ledger path to avoid issues with symlink creation
let ledger_path = fs::canonicalize(&ledger_path).unwrap_or_else(|err| {
eprintln!(
"Unable to access ledger path '{}': {}",
ledger_path.display(),
err
);
exit(1);
});
let snapshot_archive_path = value_t!(matches, "snapshot_archive_path", String)
.ok()
.map(PathBuf::from);
let wal_recovery_mode = matches
.value_of("wal_recovery_mode")
.map(BlockstoreRecoveryMode::from);
match matches.subcommand() {
("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches),
("print", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX);
let num_slots = value_t!(arg_matches, "num_slots", Slot).ok();
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
let only_rooted = arg_matches.is_present("only_rooted");
let verbose = matches.occurrences_of("verbose");
output_ledger(
open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
),
starting_slot,
ending_slot,
allow_dead_slots,
LedgerOutputMethod::Print,
num_slots,
verbose,
only_rooted,
);
}
("copy", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot);
let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String));
let source = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
let target = open_blockstore(&target_db, AccessType::PrimaryOnly, None);
for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() {
if slot > ending_slot {
break;
}
if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) {
if target.insert_shreds(shreds, None, true).is_err() {
warn!("error inserting shreds for slot {}", slot);
}
}
}
}
("genesis", Some(arg_matches)) => {
println!("{}", open_genesis_config_by(&ledger_path, arg_matches));
}
("genesis-hash", Some(arg_matches)) => {
println!(
"{}",
open_genesis_config_by(&ledger_path, arg_matches).hash()
);
}
("modify-genesis", Some(arg_matches)) => {
let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let output_directory = PathBuf::from(arg_matches.value_of("output_directory").unwrap());
if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") {
genesis_config.cluster_type = cluster_type;
}
if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") {
genesis_config.poh_config.hashes_per_tick = match hashes_per_tick {
// Note: Unlike `solana-genesis`, "auto" is not supported here.
"sleep" => None,
_ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)),
}
}
create_new_ledger(
&output_directory,
&genesis_config,
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
AccessType::PrimaryOnly,
)
.unwrap_or_else(|err| {
eprintln!("Failed to write genesis config: {:?}", err);
exit(1);
});
println!("{}", open_genesis_config_by(&output_directory, arg_matches));
}
("shred-version", Some(arg_matches)) => {
let process_options = ProcessOptions {
dev_halt_at_slot: Some(0),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match load_bank_forks(
arg_matches,
&genesis_config,
&blockstore,
process_options,
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
println!(
"{}",
compute_shred_version(
&genesis_config.hash(),
Some(&bank_forks.working_bank().hard_forks().read().unwrap())
)
);
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("shred-meta", Some(arg_matches)) => {
#[derive(Debug)]
struct ShredMeta<'a> {
slot: Slot,
full_slot: bool,
shred_index: usize,
data: bool,
code: bool,
last_in_slot: bool,
data_complete: bool,
shred: &'a Shred,
}
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX);
let ledger = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
for (slot, _meta) in ledger
.slot_meta_iterator(starting_slot)
.unwrap()
.take_while(|(slot, _)| *slot <= ending_slot)
{
let full_slot = ledger.is_full(slot);
if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) {
for (shred_index, shred) in shreds.iter().enumerate() {
println!(
"{:#?}",
ShredMeta {
slot,
full_slot,
shred_index,
data: shred.is_data(),
code: shred.is_code(),
data_complete: shred.data_complete(),
last_in_slot: shred.last_in_slot(),
shred,
}
);
}
}
}
}
("bank-hash", Some(arg_matches)) => {
let process_options = ProcessOptions {
dev_halt_at_slot: Some(0),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match load_bank_forks(
arg_matches,
&genesis_config,
&blockstore,
process_options,
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
println!("{}", &bank_forks.working_bank().hash());
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
for slot in slots {
println!("Slot {}", slot);
if let Err(err) = output_slot(
&blockstore,
slot,
allow_dead_slots,
&LedgerOutputMethod::Print,
std::u64::MAX,
) {
eprintln!("{}", err);
}
}
}
("json", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
output_ledger(
open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
),
starting_slot,
Slot::MAX,
allow_dead_slots,
LedgerOutputMethod::Json,
None,
std::u64::MAX,
true,
);
}
("dead-slots", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() {
println!("{}", slot);
}
}
("duplicate-slots", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() {
println!("{}", slot);
}
}
("set-dead-slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let blockstore =
open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode);
for slot in slots {
match blockstore.set_dead_slot(slot) {
Ok(_) => println!("Slot {} dead", slot),
Err(err) => eprintln!("Failed to set slot {} dead slot: {}", slot, err),
}
}
}
("parse_full_frozen", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let mut ancestors = BTreeSet::new();
if blockstore.meta(ending_slot).unwrap().is_none() {
panic!("Ending slot doesn't exist");
}
for a in AncestorIterator::new(ending_slot, &blockstore) {
ancestors.insert(a);
if a <= starting_slot {
break;
}
}
println!("ancestors: {:?}", ancestors.iter());
let mut frozen = BTreeMap::new();
let mut full = BTreeMap::new();
let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap();
let full_regex = Regex::new(r"slot (\d*) is full").unwrap();
let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String));
let f = BufReader::new(File::open(log_file).unwrap());
println!("Reading log file");
for line in f.lines().flatten() {
let parse_results = {
if let Some(slot_string) = frozen_regex.captures_iter(&line).next() {
Some((slot_string, &mut frozen))
} else {
full_regex
.captures_iter(&line)
.next()
.map(|slot_string| (slot_string, &mut full))
}
};
if let Some((slot_string, map)) = parse_results {
let slot = slot_string
.get(1)
.expect("Only one match group")
.as_str()
.parse::<u64>()
.unwrap();
if ancestors.contains(&slot) && !map.contains_key(&slot) {
map.insert(slot, line);
}
if slot == ending_slot && frozen.contains_key(&slot) && full.contains_key(&slot)
{
break;
}
}
}
for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) {
assert_eq!(slot1, slot2);
println!(
"Slot: {}\n, full: {}\n, frozen: {}",
slot1, full_log, frozen_log
);
}
}
("verify", Some(arg_matches)) => {
let process_options = ProcessOptions {
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: !arg_matches.is_present("skip_poh_verify"),
bpf_jit: !matches.is_present("no_bpf_jit"),
accounts_db_caching_enabled: !arg_matches.is_present("no_accounts_db_caching"),
limit_load_slot_count_from_snapshot: value_t!(
arg_matches,
"limit_load_slot_count_from_snapshot",
usize
)
.ok(),
verify_index: arg_matches.is_present("verify_accounts_index"),
allow_dead_slots: arg_matches.is_present("allow_dead_slots"),
accounts_db_test_hash_calculation: arg_matches
.is_present("accounts_db_test_hash_calculation"),
..ProcessOptions::default()
};
let print_accounts_stats = arg_matches.is_present("print_accounts_stats");
println!(
"genesis hash: {}",
open_genesis_config_by(&ledger_path, arg_matches).hash()
);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let (bank_forks, _, _) = load_bank_forks(
arg_matches,
&open_genesis_config_by(&ledger_path, arg_matches),
&blockstore,
process_options,
snapshot_archive_path,
)
.unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
});
if print_accounts_stats {
let working_bank = bank_forks.working_bank();
working_bank.print_accounts_stats();
}
println!("Ok");
}
("graph", Some(arg_matches)) => {
let output_file = value_t_or_exit!(arg_matches, "graph_filename", String);
let process_options = ProcessOptions {
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match load_bank_forks(
arg_matches,
&open_genesis_config_by(&ledger_path, arg_matches),
&blockstore,
process_options,
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
let dot = graph_forks(&bank_forks, arg_matches.is_present("include_all_votes"));
let extension = Path::new(&output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) {
render_dot(dot, &output_file, "pdf")
} else if extension == Some(OsStr::new("png")) {
render_dot(dot, &output_file, "png")
} else {
File::create(&output_file)
.and_then(|mut file| file.write_all(&dot.into_bytes()))
};
match result {
Ok(_) => println!("Wrote {}", output_file),
Err(err) => eprintln!("Unable to write {}: {}", output_file, err),
}
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("create-snapshot", Some(arg_matches)) => {
let output_directory = value_t!(arg_matches, "output_directory", PathBuf)
.unwrap_or_else(|_| ledger_path.clone());
let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok();
let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts");
let new_hard_forks = hardforks_of(arg_matches, "hard_forks");
let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey");
let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0);
let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8);
let hashes_per_tick = arg_matches.value_of("hashes_per_tick");
let bootstrap_stake_authorized_pubkey =
pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey");
let bootstrap_validator_lamports =
value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64);
let bootstrap_validator_stake_lamports =
value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64);
let minimum_stake_lamports = StakeState::get_rent_exempt_reserve(&rent);
if bootstrap_validator_stake_lamports < minimum_stake_lamports {
eprintln!(
"Error: insufficient --bootstrap-validator-stake-lamports. \
Minimum amount is {}",
minimum_stake_lamports
);
exit(1);
}
let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator");
let accounts_to_remove =
pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default();
let snapshot_version =
arg_matches
.value_of("snapshot_version")
.map_or(SnapshotVersion::default(), |s| {
s.parse::<SnapshotVersion>().unwrap_or_else(|e| {
eprintln!("Error: {}", e);
exit(1)
})
});
let maximum_snapshots_to_retain =
value_t_or_exit!(arg_matches, "maximum_snapshots_to_retain", usize);
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") {
blockstore
.rooted_slot_iterator(0)
.expect("Failed to get rooted slot iterator")
.last()
.expect("Failed to get root")
} else {
value_t_or_exit!(arg_matches, "snapshot_slot", Slot)
};
info!(
"Creating snapshot of slot {} in {}",
snapshot_slot,
output_directory.display()
);
match load_bank_forks(
arg_matches,
&genesis_config,
&blockstore,
ProcessOptions {
dev_halt_at_slot: Some(snapshot_slot),
new_hard_forks,
poh_verify: false,
..ProcessOptions::default()
},
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
let mut bank = bank_forks
.get(snapshot_slot)
.unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", snapshot_slot);
exit(1);
})
.clone();
let child_bank_required = rent_burn_percentage.is_ok()
|| hashes_per_tick.is_some()
|| remove_stake_accounts
|| !accounts_to_remove.is_empty()
|| faucet_pubkey.is_some()
|| bootstrap_validator_pubkeys.is_some();
if child_bank_required {
let mut child_bank =
Bank::new_from_parent(&bank, bank.collector_id(), bank.slot() + 1);
if let Ok(rent_burn_percentage) = rent_burn_percentage {
child_bank.set_rent_burn_percentage(rent_burn_percentage);
}
if let Some(hashes_per_tick) = hashes_per_tick {
child_bank.set_hashes_per_tick(match hashes_per_tick {
// Note: Unlike `solana-genesis`, "auto" is not supported here.
"sleep" => None,
_ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)),
});
}
bank = Arc::new(child_bank);
}
if let Some(faucet_pubkey) = faucet_pubkey {
bank.store_account(
&faucet_pubkey,
&AccountSharedData::new(faucet_lamports, 0, &system_program::id()),
);
}
if remove_stake_accounts {
for (address, mut account) in bank
.get_program_accounts(&stake::program::id())
.unwrap()
.into_iter()
{
account.set_lamports(0);
bank.store_account(&address, &account);
}
}
for address in accounts_to_remove {
if let Some(mut account) = bank.get_account(&address) {
account.set_lamports(0);
bank.store_account(&address, &account);
}
}
if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys {
assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0);
// Ensure there are no duplicated pubkeys in the --bootstrap-validator list
{
let mut v = bootstrap_validator_pubkeys.clone();
v.sort();
v.dedup();
if v.len() != bootstrap_validator_pubkeys.len() {
eprintln!(
"Error: --bootstrap-validator pubkeys cannot be duplicated"
);
exit(1);
}
}
// Delete existing vote accounts
for (address, mut account) in bank
.get_program_accounts(&solana_vote_program::id())
.unwrap()
.into_iter()
{
account.set_lamports(0);
bank.store_account(&address, &account);
}
// Add a new identity/vote/stake account for each of the provided bootstrap
// validators
let mut bootstrap_validator_pubkeys_iter =
bootstrap_validator_pubkeys.iter();
loop {
let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() {
None => break,
Some(identity_pubkey) => identity_pubkey,
};
let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap();
let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap();
bank.store_account(
identity_pubkey,
&AccountSharedData::new(
bootstrap_validator_lamports,
0,
&system_program::id(),
),
);
let vote_account = vote_state::create_account_with_authorized(
identity_pubkey,
identity_pubkey,
identity_pubkey,
100,
VoteState::get_rent_exempt_reserve(&rent).max(1),
);
bank.store_account(
stake_pubkey,
&stake_state::create_account(
bootstrap_stake_authorized_pubkey
.as_ref()
.unwrap_or(identity_pubkey),
vote_pubkey,
&vote_account,
&rent,
bootstrap_validator_stake_lamports,
),
);
bank.store_account(vote_pubkey, &vote_account);
}
// Warp ahead at least two epochs to ensure that the leader schedule will be
// updated to reflect the new bootstrap validator(s)
let minimum_warp_slot =
genesis_config.epoch_schedule.get_first_slot_in_epoch(
genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2,
);
if let Some(warp_slot) = warp_slot {
if warp_slot < minimum_warp_slot {
eprintln!(
"Error: --warp-slot too close. Must be >= {}",
minimum_warp_slot
);
exit(1);
}
} else {
warn!("Warping to slot {}", minimum_warp_slot);
warp_slot = Some(minimum_warp_slot);
}
}
if child_bank_required {
while !bank.is_complete() {
bank.register_tick(&Hash::new_unique());
}
}
bank.set_capitalization();
let bank = if let Some(warp_slot) = warp_slot {
Arc::new(Bank::warp_from_parent(
&bank,
bank.collector_id(),
warp_slot,
))
} else {
bank
};
println!(
"Creating a version {} snapshot of slot {}",
snapshot_version,
bank.slot(),
);
let archive_file = snapshot_utils::bank_to_snapshot_archive(
ledger_path,
&bank,
Some(snapshot_version),
output_directory,
ArchiveFormat::TarZstd,
None,
maximum_snapshots_to_retain,
)
.unwrap_or_else(|err| {
eprintln!("Unable to create snapshot: {}", err);
exit(1);
});
println!(
"Successfully created snapshot for slot {}, hash {}: {}",
bank.slot(),
bank.hash(),
archive_file.display(),
);
println!(
"Shred version: {}",
compute_shred_version(
&genesis_config.hash(),
Some(&bank.hard_forks().read().unwrap())
)
);
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("accounts", Some(arg_matches)) => {
let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let process_options = ProcessOptions {
dev_halt_at_slot,
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let include_sysvars = arg_matches.is_present("include_sysvars");
let exclude_account_data = arg_matches.is_present("exclude_account_data");
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match load_bank_forks(
arg_matches,
&genesis_config,
&blockstore,
process_options,
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
let slot = bank_forks.working_bank().slot();
let bank = bank_forks.get(slot).unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", slot);
exit(1);
});
let accounts: BTreeMap<_, _> = bank
.get_all_accounts_with_modified_slots()
.unwrap()
.into_iter()
.filter(|(pubkey, _account, _slot)| {
include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey)
})
.map(|(pubkey, account, slot)| (pubkey, (account, slot)))
.collect();
println!("---");
for (pubkey, (account, slot)) in accounts.into_iter() {
let data_len = account.data().len();
println!("{}:", pubkey);
println!(" - balance: {} SOL", lamports_to_sol(account.lamports()));
println!(" - owner: '{}'", account.owner());
println!(" - executable: {}", account.executable());
println!(" - slot: {}", slot);
println!(" - rent_epoch: {}", account.rent_epoch());
if !exclude_account_data {
println!(" - data: '{}'", bs58::encode(account.data()).into_string());
}
println!(" - data_len: {}", data_len);
}
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("capitalization", Some(arg_matches)) => {
let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let process_options = ProcessOptions {
dev_halt_at_slot,
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match load_bank_forks(
arg_matches,
&genesis_config,
&blockstore,
process_options,
snapshot_archive_path,
) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
let slot = bank_forks.working_bank().slot();
let bank = bank_forks.get(slot).unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", slot);
exit(1);
});
if arg_matches.is_present("recalculate_capitalization") {
println!("Recalculating capitalization");
let old_capitalization = bank.set_capitalization();
if old_capitalization == bank.capitalization() {
eprintln!("Capitalization was identical: {}", Sol(old_capitalization));
}
}
if arg_matches.is_present("warp_epoch") {
let base_bank = bank;
let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap();
let warp_epoch = if raw_warp_epoch.starts_with('+') {
base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
} else {
value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
};
if warp_epoch < base_bank.epoch() {
eprintln!(
"Error: can't warp epoch backwards: {} => {}",
base_bank.epoch(),
warp_epoch
);
exit(1);
}
if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) {
let inflation = match raw_inflation.as_str() {
"pico" => Inflation::pico(),
"full" => Inflation::full(),
"none" => Inflation::new_disabled(),
_ => unreachable!(),
};
println!(
"Forcing to: {:?} (was: {:?})",
inflation,
base_bank.inflation()
);
base_bank.set_inflation(inflation);
}
let next_epoch = base_bank
.epoch_schedule()
.get_first_slot_in_epoch(warp_epoch);
// disable eager rent collection because this creates many unrelated
// rent collection account updates
base_bank
.lazy_rent_collection
.store(true, std::sync::atomic::Ordering::Relaxed);
let feature_account_balance = std::cmp::max(
genesis_config.rent.minimum_balance(Feature::size_of()),
1,
);
if arg_matches.is_present("enable_stake_program_v2") {
let mut force_enabled_count = 0;
if base_bank
.get_account(&feature_set::stake_program_v2::id())
.is_none()
{
base_bank.store_account(
&feature_set::stake_program_v2::id(),
&feature::create_account(
&Feature { activated_at: None },
feature_account_balance,
),
);
force_enabled_count += 1;
}
if base_bank
.get_account(&feature_set::rewrite_stake::id())
.is_none()
{
base_bank.store_account(
&feature_set::rewrite_stake::id(),
&feature::create_account(
&Feature { activated_at: None },
feature_account_balance,
),
);
force_enabled_count += 1;
}
if force_enabled_count == 0 {
warn!("Already stake_program_v2 is activated (or scheduled)");
}
let mut store_failed_count = 0;
if force_enabled_count >= 1 {
if base_bank
.get_account(&feature_set::spl_token_v2_multisig_fix::id())
.is_some()
{
// steal some lamports from the pretty old feature not to affect
// capitalizaion, which doesn't affect inflation behavior!
base_bank.store_account(
&feature_set::spl_token_v2_multisig_fix::id(),
&AccountSharedData::default(),
);
force_enabled_count -= 1;
} else {
store_failed_count += 1;
}
}
if force_enabled_count >= 1 {
if base_bank
.get_account(&feature_set::instructions_sysvar_enabled::id())
.is_some()
{
// steal some lamports from the pretty old feature not to affect
// capitalizaion, which doesn't affect inflation behavior!
base_bank.store_account(
&feature_set::instructions_sysvar_enabled::id(),
&AccountSharedData::default(),
);
force_enabled_count -= 1;
} else {
store_failed_count += 1;
}
}
assert_eq!(force_enabled_count, store_failed_count);
if store_failed_count >= 1 {
// we have no choice; maybe locally created blank cluster with
// not-Development cluster type.
let old_cap = base_bank.set_capitalization();
let new_cap = base_bank.capitalization();
warn!(
"Skewing capitalization a bit to enable stake_program_v2 as \
requested: increasing {} from {} to {}",
feature_account_balance, old_cap, new_cap,
);
assert_eq!(
old_cap + feature_account_balance * store_failed_count,
new_cap
);
}
}
#[derive(Default, Debug)]
struct PointDetail {
epoch: Epoch,
points: u128,
stake: u128,
credits: u128,
}
#[derive(Default, Debug)]
struct CalculationDetail {
epochs: usize,
voter: Pubkey,
voter_owner: Pubkey,
current_effective_stake: u64,
total_stake: u64,
rent_exempt_reserve: u64,
points: Vec<PointDetail>,
base_rewards: u64,
commission: u8,
vote_rewards: u64,
stake_rewards: u64,
activation_epoch: Epoch,
deactivation_epoch: Option<Epoch>,
point_value: Option<PointValue>,
old_credits_observed: Option<u64>,
new_credits_observed: Option<u64>,
skipped_reasons: String,
}
use solana_stake_program::stake_state::InflationPointCalculationEvent;
let mut stake_calcuration_details: HashMap<Pubkey, CalculationDetail> =
HashMap::new();
let mut last_point_value = None;
let tracer = |event: &RewardCalculationEvent| {
// Currently RewardCalculationEvent enum has only Staking variant
// because only staking tracing is supported!
#[allow(irrefutable_let_patterns)]
if let RewardCalculationEvent::Staking(pubkey, event) = event {
let detail = stake_calcuration_details.entry(**pubkey).or_default();
match event {
InflationPointCalculationEvent::CalculatedPoints(
epoch,
stake,
credits,
points,
) => {
if *points > 0 {
detail.epochs += 1;
detail.points.push(PointDetail {epoch: *epoch, points: *points, stake: *stake, credits: *credits});
}
}
InflationPointCalculationEvent::SplitRewards(
all,
voter,
staker,
point_value,
) => {
detail.base_rewards = *all;
detail.vote_rewards = *voter;
detail.stake_rewards = *staker;
detail.point_value = Some(point_value.clone());
// we have duplicate copies of `PointValue`s for possible
// miscalculation; do some minimum sanity check
let point_value = detail.point_value.clone();
if point_value.is_some() {
if last_point_value.is_some() {
assert_eq!(last_point_value, point_value,);
}
last_point_value = point_value;
}
}
InflationPointCalculationEvent::EffectiveStakeAtRewardedEpoch(stake) => {
detail.current_effective_stake = *stake;
}
InflationPointCalculationEvent::Commission(commission) => {
detail.commission = *commission;
}
InflationPointCalculationEvent::RentExemptReserve(reserve) => {
detail.rent_exempt_reserve = *reserve;
}
InflationPointCalculationEvent::CreditsObserved(
old_credits_observed,
new_credits_observed,
) => {
detail.old_credits_observed = Some(*old_credits_observed);
detail.new_credits_observed = *new_credits_observed;
}
InflationPointCalculationEvent::Delegation(
delegation,
owner,
) => {
detail.voter = delegation.voter_pubkey;
detail.voter_owner = *owner;
detail.total_stake = delegation.stake;
detail.activation_epoch = delegation.activation_epoch;
if delegation.deactivation_epoch < Epoch::max_value() {
detail.deactivation_epoch =
Some(delegation.deactivation_epoch);
}
}
InflationPointCalculationEvent::Skipped(skipped_reason) => {
if detail.skipped_reasons.is_empty() {
detail.skipped_reasons = format!("{:?}", skipped_reason);
} else {
detail.skipped_reasons += &format!("/{:?}", skipped_reason);
}
}
}
}
};
let warped_bank = Bank::new_from_parent_with_tracer(
base_bank,
base_bank.collector_id(),
next_epoch,
tracer,
);
warped_bank.freeze();
let mut csv_writer = if arg_matches.is_present("csv_filename") {
let csv_filename =
value_t_or_exit!(arg_matches, "csv_filename", String);
let file = File::create(&csv_filename).unwrap();
Some(csv::WriterBuilder::new().from_writer(file))
} else {
None
};
println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot());
println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch());
assert_capitalization(base_bank);
assert_capitalization(&warped_bank);
let interest_per_epoch = ((warped_bank.capitalization() as f64)
/ (base_bank.capitalization() as f64)
* 100_f64)
- 100_f64;
let interest_per_year = interest_per_epoch
/ warped_bank.epoch_duration_in_years(base_bank.epoch());
println!(
"Capitalization: {} => {} (+{} {}%; annualized {}%)",
Sol(base_bank.capitalization()),
Sol(warped_bank.capitalization()),
Sol(warped_bank.capitalization() - base_bank.capitalization()),
interest_per_epoch,
interest_per_year,
);
let mut overall_delta = 0;
let modified_accounts =
warped_bank.get_all_accounts_modified_since_parent();
let mut rewarded_accounts = modified_accounts
.iter()
.map(|(pubkey, account)| {
(
pubkey,
account,
base_bank
.get_account(pubkey)
.map(|a| a.lamports())
.unwrap_or_default(),
)
})
.collect::<Vec<_>>();
rewarded_accounts.sort_unstable_by_key(
|(pubkey, account, base_lamports)| {
(
*account.owner(),
*base_lamports,
account.lamports() - base_lamports,
*pubkey,
)
},
);
let mut unchanged_accounts = stake_calcuration_details
.keys()
.collect::<HashSet<_>>()
.difference(
&rewarded_accounts
.iter()
.map(|(pubkey, ..)| *pubkey)
.collect(),
)
.map(|pubkey| (**pubkey, warped_bank.get_account(pubkey).unwrap()))
.collect::<Vec<_>>();
unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| {
(*account.owner(), account.lamports(), *pubkey)
});
let unchanged_accounts = unchanged_accounts.into_iter();
let rewarded_accounts = rewarded_accounts
.into_iter()
.map(|(pubkey, account, ..)| (*pubkey, account.clone()));
let all_accounts = unchanged_accounts.chain(rewarded_accounts);
for (pubkey, warped_account) in all_accounts {
// Don't output sysvars; it's always updated but not related to
// inflation.
if solana_sdk::sysvar::is_sysvar_id(&pubkey) {
continue;
}
if let Some(base_account) = base_bank.get_account(&pubkey) {
let delta = warped_account.lamports() - base_account.lamports();
let detail = stake_calcuration_details.get(&pubkey);
println!(
"{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}",
format!("{}", pubkey), // format! is needed to pad/justify correctly.
base_account.owner(),
Sol(base_account.lamports()),
Sol(warped_account.lamports()),
Sol(delta),
((warped_account.lamports() as f64)
/ (base_account.lamports() as f64)
* 100_f64)
- 100_f64,
detail,
);
if let Some(ref mut csv_writer) = csv_writer {
#[derive(Serialize)]
struct InflationRecord {
cluster_type: String,
rewarded_epoch: Epoch,
account: String,
owner: String,
old_balance: u64,
new_balance: u64,
data_size: usize,
delegation: String,
delegation_owner: String,
effective_stake: String,
delegated_stake: String,
rent_exempt_reserve: String,
activation_epoch: String,
deactivation_epoch: String,
earned_epochs: String,
epoch: String,
epoch_credits: String,
epoch_points: String,
epoch_stake: String,
old_credits_observed: String,
new_credits_observed: String,
base_rewards: String,
stake_rewards: String,
vote_rewards: String,
commission: String,
cluster_rewards: String,
cluster_points: String,
old_capitalization: u64,
new_capitalization: u64,
}
fn format_or_na<T: std::fmt::Display>(
data: Option<T>,
) -> String {
data.map(|data| format!("{}", data))
.unwrap_or_else(|| "N/A".to_owned())
}
let mut point_details = detail
.map(|d| d.points.iter().map(Some).collect::<Vec<_>>())
.unwrap_or_default();
// ensure to print even if there is no calculation/point detail
if point_details.is_empty() {
point_details.push(None);
}
for point_detail in point_details {
let record = InflationRecord {
cluster_type: format!("{:?}", base_bank.cluster_type()),
rewarded_epoch: base_bank.epoch(),
account: format!("{}", pubkey),
owner: format!("{}", base_account.owner()),
old_balance: base_account.lamports(),
new_balance: warped_account.lamports(),
data_size: base_account.data().len(),
delegation: format_or_na(detail.map(|d| d.voter)),
delegation_owner: format_or_na(
detail.map(|d| d.voter_owner),
),
effective_stake: format_or_na(
detail.map(|d| d.current_effective_stake),
),
delegated_stake: format_or_na(
detail.map(|d| d.total_stake),
),
rent_exempt_reserve: format_or_na(
detail.map(|d| d.rent_exempt_reserve),
),
activation_epoch: format_or_na(detail.map(|d| {
if d.activation_epoch < Epoch::max_value() {
d.activation_epoch
} else {
// bootstraped
0
}
})),
deactivation_epoch: format_or_na(
detail.and_then(|d| d.deactivation_epoch),
),
earned_epochs: format_or_na(detail.map(|d| d.epochs)),
epoch: format_or_na(point_detail.map(|d| d.epoch)),
epoch_credits: format_or_na(
point_detail.map(|d| d.credits),
),
epoch_points: format_or_na(
point_detail.map(|d| d.points),
),
epoch_stake: format_or_na(
point_detail.map(|d| d.stake),
),
old_credits_observed: format_or_na(
detail.and_then(|d| d.old_credits_observed),
),
new_credits_observed: format_or_na(
detail.and_then(|d| d.new_credits_observed),
),
base_rewards: format_or_na(
detail.map(|d| d.base_rewards),
),
stake_rewards: format_or_na(
detail.map(|d| d.stake_rewards),
),
vote_rewards: format_or_na(
detail.map(|d| d.vote_rewards),
),
commission: format_or_na(detail.map(|d| d.commission)),
cluster_rewards: format_or_na(
last_point_value.as_ref().map(|pv| pv.rewards),
),
cluster_points: format_or_na(
last_point_value.as_ref().map(|pv| pv.points),
),
old_capitalization: base_bank.capitalization(),
new_capitalization: warped_bank.capitalization(),
};
csv_writer.serialize(&record).unwrap();
}
}
overall_delta += delta;
} else {
error!("new account!?: {}", pubkey);
}
}
if overall_delta > 0 {
println!("Sum of lamports changes: {}", Sol(overall_delta));
}
} else {
if arg_matches.is_present("recalculate_capitalization") {
eprintln!("Capitalization isn't verified because it's recalculated");
}
if arg_matches.is_present("inflation") {
eprintln!(
"Forcing inflation isn't meaningful because bank isn't warping"
);
}
assert_capitalization(bank);
println!("Inflation: {:?}", bank.inflation());
println!("RentCollector: {:?}", bank.rent_collector());
println!("Capitalization: {}", Sol(bank.capitalization()));
}
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("purge", Some(arg_matches)) => {
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
let end_slot = value_t!(arg_matches, "end_slot", Slot).ok();
let no_compaction = arg_matches.is_present("no_compaction");
let dead_slots_only = arg_matches.is_present("dead_slots_only");
let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize);
let access_type = if !no_compaction {
AccessType::PrimaryOnly
} else {
AccessType::PrimaryOnlyForMaintenance
};
let blockstore = open_blockstore(&ledger_path, access_type, wal_recovery_mode);
let end_slot = match end_slot {
Some(end_slot) => end_slot,
None => match blockstore.slot_meta_iterator(start_slot) {
Ok(metas) => {
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
if slots.is_empty() {
eprintln!("Purge range is empty");
exit(1);
}
*slots.last().unwrap()
}
Err(err) => {
eprintln!("Unable to read the Ledger: {:?}", err);
exit(1);
}
},
};
if end_slot < start_slot {
eprintln!(
"end slot {} is less than start slot {}",
end_slot, start_slot
);
exit(1);
}
info!(
"Purging data from slots {} to {} ({} slots) (skip compaction: {}) (dead slot only: {})",
start_slot,
end_slot,
end_slot - start_slot,
no_compaction,
dead_slots_only,
);
let purge_from_blockstore = |start_slot, end_slot| {
blockstore.purge_from_next_slots(start_slot, end_slot);
if no_compaction {
blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact);
} else {
blockstore.purge_and_compact_slots(start_slot, end_slot);
}
};
if !dead_slots_only {
let slots_iter = &(start_slot..=end_slot).chunks(batch_size);
for slots in slots_iter {
let slots = slots.collect::<Vec<_>>();
assert!(!slots.is_empty());
let start_slot = *slots.first().unwrap();
let end_slot = *slots.last().unwrap();
info!(
"Purging chunked slots from {} to {} ({} slots)",
start_slot,
end_slot,
end_slot - start_slot
);
purge_from_blockstore(start_slot, end_slot);
}
} else {
let dead_slots_iter = blockstore
.dead_slots_iterator(start_slot)
.unwrap()
.take_while(|s| *s <= end_slot);
for dead_slot in dead_slots_iter {
info!("Purging dead slot {}", dead_slot);
purge_from_blockstore(dead_slot, dead_slot);
}
}
}
("list-roots", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
} else {
usize::MAX
};
let start_root = if let Some(height) = arg_matches.value_of("start_root") {
Slot::from_str(height).expect("Starting root must be a number")
} else {
0
};
let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") {
usize::from_str(roots).expect("Number of roots must be a number")
} else {
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
};
let iter = blockstore
.rooted_slot_iterator(start_root)
.expect("Failed to get rooted slot");
let mut slot_hash = Vec::new();
for (i, slot) in iter.into_iter().enumerate() {
if i > num_roots {
break;
}
if slot <= max_height as u64 {
let blockhash = blockstore
.get_slot_entries(slot, 0)
.unwrap()
.last()
.unwrap()
.hash;
slot_hash.push((slot, blockhash));
} else {
break;
}
}
let mut output_file: Box<dyn Write> =
if let Some(path) = arg_matches.value_of("slot_list") {
match File::create(path) {
Ok(file) => Box::new(file),
_ => Box::new(stdout()),
}
} else {
Box::new(stdout())
};
slot_hash
.into_iter()
.rev()
.enumerate()
.for_each(|(i, (slot, hash))| {
if i < num_roots {
output_file
.write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes())
.expect("failed to write");
}
});
}
("repair-roots", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let start_root = if let Some(root) = arg_matches.value_of("start_root") {
Slot::from_str(root).expect("Before root must be a number")
} else {
blockstore.max_root()
};
let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64);
let end_root = if let Some(root) = arg_matches.value_of("end_root") {
Slot::from_str(root).expect("Until root must be a number")
} else {
start_root.saturating_sub(max_slots)
};
assert!(start_root > end_root);
assert!(blockstore.is_root(start_root));
let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked
if arg_matches.is_present("end_root") && num_slots > max_slots {
eprintln!(
"Requested range {} too large, max {}. \
Either adjust `--until` value, or pass a larger `--repair-limit` \
to override the limit",
num_slots, max_slots,
);
exit(1);
}
let ancestor_iterator =
AncestorIterator::new(start_root, &blockstore).take_while(|&slot| slot >= end_root);
let roots_to_fix: Vec<_> = ancestor_iterator
.filter(|slot| !blockstore.is_root(*slot))
.collect();
if !roots_to_fix.is_empty() {
eprintln!("{} slots to be rooted", roots_to_fix.len());
for chunk in roots_to_fix.chunks(100) {
eprintln!("{:?}", chunk);
blockstore
.set_roots(roots_to_fix.iter())
.unwrap_or_else(|err| {
eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err);
exit(1);
});
}
} else {
println!(
"No missing roots found in range {} to {}",
end_root, start_root
);
}
}
("bounds", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
match blockstore.slot_meta_iterator(0) {
Ok(metas) => {
let all = arg_matches.is_present("all");
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
if slots.is_empty() {
println!("Ledger is empty");
} else {
let first = slots.first().unwrap();
let last = slots.last().unwrap_or(first);
if first != last {
println!(
"Ledger has data for {} slots {:?} to {:?}",
slots.len(),
first,
last
);
if all {
println!("Non-empty slots: {:?}", slots);
}
} else {
println!("Ledger has data for slot {:?}", first);
}
}
if let Ok(rooted) = blockstore.rooted_slot_iterator(0) {
let mut first_rooted = 0;
let mut last_rooted = 0;
let mut total_rooted = 0;
for (i, slot) in rooted.into_iter().enumerate() {
if i == 0 {
first_rooted = slot;
}
last_rooted = slot;
total_rooted += 1;
}
let mut count_past_root = 0;
for slot in slots.iter().rev() {
if *slot > last_rooted {
count_past_root += 1;
} else {
break;
}
}
println!(
" with {} rooted slots from {:?} to {:?}",
total_rooted, first_rooted, last_rooted
);
println!(" and {} slots past the last root", count_past_root);
} else {
println!(" with no rooted slots");
}
}
Err(err) => {
eprintln!("Unable to read the Ledger: {:?}", err);
exit(1);
}
};
}
("analyze-storage", _) => {
analyze_storage(&open_database(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
));
println!("Ok.");
}
("compute-slot-cost", Some(arg_matches)) => {
let blockstore = open_blockstore(
&ledger_path,
AccessType::TryPrimaryThenSecondary,
wal_recovery_mode,
);
let mut slots: Vec<u64> = vec![];
if !arg_matches.is_present("slots") {
if let Ok(metas) = blockstore.slot_meta_iterator(0) {
slots = metas.map(|(slot, _)| slot).collect();
}
} else {
slots = values_t_or_exit!(arg_matches, "slots", Slot);
}
for slot in slots {
if let Err(err) = compute_slot_cost(&blockstore, slot) {
eprintln!("{}", err);
}
}
}
("", _) => {
eprintln!("{}", matches.usage());
exit(1);
}
_ => unreachable!(),
};
}
|
_bank_forks(
|
enum.ts
|
FOLDER = 'folder',
DB = 'db',
}
|
export type FileTokenType = 'fileAccess' | 'fileAccessLink';
export enum FileStorage {
|
|
wallet.go
|
/*
* Flow Emulator
*
* Copyright 2019-2022 Dapper Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package server
import (
"archive/zip"
"bytes"
"context"
"embed"
"errors"
"fmt"
"mime"
"net/http"
"path/filepath"
"strings"
)
var (
//go:embed devWallet
devWallet embed.FS
)
const (
ApiPath = "/api/"
)
type WalletServer struct {
httpServer *http.Server
zipFS *zip.Reader
}
func NewWalletServer(
config WalletConfig,
port int,
headers []HTTPHeader,
) *WalletServer
|
func (m WalletServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
upath := r.URL.Path
if !strings.HasPrefix(upath, "/") {
upath = "/" + upath
r.URL.Path = upath
}
if strings.HasSuffix(upath, "/") {
upath = "/index.html"
r.URL.Path = upath
}
file, err := m.zipFS.Open(upath[1:])
if err != nil {
//try with .html suffix
upath = upath + ".html"
file, err = m.zipFS.Open(upath[1:])
if err != nil {
w.WriteHeader(500)
return
}
}
//detect mime type
extension := filepath.Ext(upath)
mimeType := mime.TypeByExtension("." + extension)
if mimeType != "" {
w.Header().Add("Content-Type", mimeType)
}
fileStat, _ := file.Stat()
target := fileStat.Size()
var buffer []byte = make([]byte, 32768)
for target > 0 {
count, _ := file.Read(buffer)
_, err := w.Write(buffer[:count])
if err != nil {
return
}
target = target - int64(count)
}
}
func (h *WalletServer) Start() error {
err := h.httpServer.ListenAndServe()
if errors.Is(err, http.ErrServerClosed) {
return nil
}
return err
}
func (h *WalletServer) Stop() {
_ = h.httpServer.Shutdown(context.Background())
}
|
{
mux := http.NewServeMux()
httpServer := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
}
zipContent, _ := devWallet.ReadFile("devWallet/html.zip")
zipFS, _ := zip.NewReader(bytes.NewReader(zipContent), int64(len(zipContent)))
me := &WalletServer{
httpServer: httpServer,
zipFS: zipFS,
}
mux.Handle("/", me)
// API handler
mux.Handle(ApiPath, NewWalletApiServer(config))
return &WalletServer{
httpServer: httpServer,
}
}
|
extension.ts
|
import { LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient/node';
import { Trace } from 'vscode-jsonrpc';
import { ExtensionContext, workspace, commands, window, StatusBarItem, StatusBarAlignment, Terminal, Uri, Position } from 'vscode';
import * as fs from "fs";
import { TopModelConfig, TopModelException } from './types';
import { registerPreview } from './preview';
const open = require('open');
const exec = require('child_process').exec;
const yaml = require("js-yaml");
const SERVER_EXE = 'dotnet';
export const COMMANDS = {
update: "topmodel.modgen.update",
install: "topmodel.modgen.install",
modgen: "topmodel.modgen",
modgenWatch: "topmodel.modgen.watch",
preview: "topmodel.preview",
findRef: "topmodel.findRef"
};
let NEXT_TERM_ID = 1;
let currentTerminal: Terminal;
let lsStarted = false;
let topModelStatusBar: StatusBarItem;
export function activate(context: ExtensionContext) {
if (!lsStarted) {
createStatusBar();
checkInstall();
findConfFile().then((conf) => {
const config = conf.config;
const configPath = conf.file.path;
startLanguageServer(context, configPath, config);
registerCommands(context, configPath);
}, error => {
handleError(error);
});
}
}
function createStatusBar() {
topModelStatusBar = window.createStatusBarItem(StatusBarAlignment.Right, 100);
topModelStatusBar.text = '$(loading~spin) Topmodel';
topModelStatusBar.tooltip = 'Topmodel is loading configuration';
topModelStatusBar.show();
}
function execute(command: string, callback: Function) {
exec(command, function (error: string, stdout: string, stderr: string) { callback(stdout); });
}
/********************************************************* */
/*********************** CHECKS ************************** */
/********************************************************* */
function checkInstall() {
execute('echo ;%PATH%; | find /C /I "dotnet"', async (dotnetIsInstalled: string) => {
if (dotnetIsInstalled !== '1\r\n') {
const selection = await window.showInformationMessage('Dotnet is not installed', "Show download page");
if (selection === "Show download page") {
open("https://dotnet.microsoft.com/download/dotnet/6.0");
}
} else {
checkTopModelInsall();
}
});
}
function checkTopModelInsall() {
execute('dotnet tool list -g | find /C /I "topmodel"', async (result: string) => {
if (result !== '1\r\n') {
const option = "Install TopModel";
const selection = await window.showInformationMessage('TopModel n\'est pas installé', option);
if (selection === option) {
commands.executeCommand(COMMANDS.install);
}
} else {
checkTopModelUpdate();
}
});
}
async function checkTopModelUpdate() {
const https = require('https');
const options = {
hostname: 'api.nuget.org',
port: 443,
path: '/v3-flatcontainer/TopModel.Generator/index.json',
method: 'GET'
};
const req = https.request(options, (res: any) => {
res.on('data', (reponse: string) => {
const { versions }: { versions: string[] } = JSON.parse(reponse);
const latest = versions[versions.length - 1];
execute(`modgen --version`, async (result: string) => {
const currentVersion = result.replace('\r\n', '');
if (currentVersion !== latest) {
const option = "Update TopModel";
const selection = await window.showInformationMessage('TopModel peut être mis à jour', option);
if (selection === option) {
commands.executeCommand(COMMANDS.update);
}
}
});
});
});
req.on('error', (error: any) => {
console.error(error);
});
req.end();
}
/********************************************************* */
/********************* COMMANDS ************************** */
/********************************************************* */
function installModgen() {
const terminal = getTerminal();
terminal.sendText("dotnet tool install --global TopModel.Generator");
terminal.show();
}
function updateModgen() {
const terminal = getTerminal();
terminal.sendText("dotnet tool update --global TopModel.Generator");
terminal.show();
open("https://github.com/klee-contrib/topmodel/blob/develop/CHANGELOG.md");
}
function startModgen(watch: boolean, configPath: string) {
const terminal = getTerminal();
terminal.sendText(
`modgen ${configPath}` + (watch ? " --watch" : "")
);
}
function getTerminal() {
if (!currentTerminal || !window.terminals.includes(currentTerminal)) {
currentTerminal = window.createTerminal({
name: `Topmodel : #${NEXT_TERM_ID++}`,
message: "Starting modgen in a new terminal"
});
}
return currentTerminal;
}
function registerCommands(context: ExtensionContext, configPath: string) {
const modgen = commands.registerCommand(
COMMANDS.modgen,
() => {
startModgen(false, configPath);
}
);
const modgenWatch =
commands.registerCommand(COMMANDS.modgenWatch,
() => {
startModgen(true, configPath);
}
);
const modgenInstall = commands.registerCommand(COMMANDS.install, () => installModgen());
const modgenUpdate = commands.registerCommand(COMMANDS.update, () => updateModgen());
context.subscriptions.push(modgenInstall, modgenUpdate, modgen, modgenWatch);
commands.registerCommand(COMMANDS.findRef, async (line: number) => {
await commands.executeCommand("editor.action.goToLocations", window.activeTextEditor!.document.uri, new Position(line, 0), []);
await commands.executeCommand("editor.action.goToReferences");
});
return NEXT_TERM_ID;
}
async function findConfFile(): Promise<{ config: TopModelConfig, file: Uri }> {
const files = await workspace.findFiles("**/topmodel*.config");
let configs: { config: TopModelConfig, file: Uri }[] = files.map((file) => {
const doc = fs.readFileSync(file.path.substring(1), "utf8");
const c = doc
.split("---")
.filter(e => e)
.map(yaml.load)
.map(e => e as TopModelConfig)
.filter(e => e.app)
[0];
return { config: c, file };
});
if (configs.length > 1) {
throw new TopModelException("Plusieurs fichiers de configuration trouvés. L'extension n'a pas démarré (coming soon)");
} else if (configs.length === 0) {
throw new TopModelException("Topmodel a démarré car un fichier de configuration se trouvait dans votre workspace, mais il est désormais introuvable.");
}
return configs[0];
}
|
function startLanguageServer(context: ExtensionContext, configPath: string, config: TopModelConfig) {
// The server is implemented in node
const args = [context.asAbsolutePath("./language-server/TopModel.LanguageServer.dll")];
let configRelativePath = workspace.asRelativePath(configPath);
if ((workspace.workspaceFolders?.length || 0) > 1) {
configRelativePath = configRelativePath.split("/").splice(1).join('/');
}
args.push(configPath.substring(1));
let serverOptions: ServerOptions = {
run: { command: SERVER_EXE, args },
debug: { command: SERVER_EXE, args }
};
let configFolderA = configRelativePath.split("/");
configFolderA.pop();
const configFolder = configFolderA.join('/');
let modelRoot = config.modelRoot || configFolder;
// Options to control the language client
let clientOptions: LanguageClientOptions = {
// Register the server for plain text documents
documentSelector: [{ pattern: `${modelRoot}**/tmd` }],
synchronize: {
configurationSection: 'topmodel',
fileEvents: workspace.createFileSystemWatcher(`${modelRoot}**/*.tmd`)
},
};
// Create the language client and start the client.
const client = new LanguageClient('topmodel', 'TopModel', serverOptions, clientOptions);
client.trace = Trace.Verbose;
let disposable = client.start();
client.onReady().then(() => {
handleLsReady(config, context);
registerPreview(context, client);
});
// Push the disposable to the context's subscriptions so that the
// client can be deactivated on extension deactivation
context.subscriptions.push(disposable);
}
function handleLsReady(config: TopModelConfig, context: ExtensionContext): void {
topModelStatusBar.text = "$(check-all) TopModel";
topModelStatusBar.tooltip = "TopModel is running for app " + config.app;
topModelStatusBar.command = "extension.topmodel";
context.subscriptions.push(topModelStatusBar);
lsStarted = true;
}
function handleError(exception: TopModelException) {
window.showErrorMessage(exception.message);
topModelStatusBar.text = "$(diff-review-close) TopModel";
topModelStatusBar.tooltip = "TopModel is not running";
}
| |
basic.go
|
package controllers
type Response struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data interface{} `json:"data"`
}
func (c *ApiController) Response(code int, args ...interface{}) {
var msg string
var data interface{}
switch len(args) {
case 2:
data = args[1]
fallthrough
case 1:
msg, _ = args[0].(string)
}
c.Data["json"] = Response{code, msg, data}
err := c.ServeJSON()
if err != nil
|
}
func (c *ApiController) HasParam(key string) (string, bool) {
param := c.Ctx.Request.Form.Get(key)
if len(param) == 0 {
c.Response(-1, "Miss param: " + key)
return param, false
}
return param, true
}
func (c *ApiController) CheckPostBody(keys ...string) bool {
for _, key := range keys {
if _, has := c.HasParam(key); !has {
return false
}
}
return true
}
|
{
panic(err)
}
|
data.rs
|
//! Module contains code for parsing and manipulating event data.
use crate::errors::ExecutionError;
use ethcontract_common::abi::{Event as AbiEvent, RawLog as AbiRawLog};
use web3::contract::tokens::Detokenize;
use web3::types::{Log, H256};
/// A contract event
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Event<T> {
/// The decoded log data.
pub data: T,
/// The additional metadata for the event. Note that this is not always
/// available if these logs are pending. This can happen if the `to_block`
/// option was set to `BlockNumber::Pending`.
pub meta: Option<EventMetadata>,
}
/// A contract event from an event stream.
///
/// This is similar to `Event`s except the event may either be added (in case a
/// new block is mined) or removed (in case of re-orgs when blocks are removed).
pub type StreamEvent<T> = Event<EventStatus<T>>;
/// A type representing a contract event that was either added or removed. Note
/// that this type intentionally an enum so that the handling of removed events
/// is made more explicit.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum EventStatus<T> {
/// A new event was received.
Added(T),
/// A previously mined event was removed as a result of a re-org.
Removed(T),
}
impl<T> Event<T> {
/// Creates an event from a log given a mapping function.
pub(crate) fn from_past_log<E, F>(log: Log, f: F) -> Result<Self, ExecutionError>
where
F: FnOnce(RawLog) -> Result<T, E>,
ExecutionError: From<E>,
{
if log.removed == Some(true) {
return Err(ExecutionError::RemovedLog(Box::new(log)));
}
let meta = EventMetadata::from_log(&log);
let raw = RawLog::from(log);
let data = f(raw)?;
Ok(Event { data, meta })
}
}
impl<T> Event<EventStatus<T>> {
/// Creates an event from a log given a mapping function.
pub(crate) fn from_streamed_log<E, F>(log: Log, f: F) -> Result<Self, ExecutionError>
where
F: FnOnce(RawLog) -> Result<T, E>,
ExecutionError: From<E>,
{
let removed = log.removed == Some(true);
let meta = EventMetadata::from_log(&log);
let raw = RawLog::from(log);
let inner_data = f(raw)?;
let data = if removed {
EventStatus::Removed(inner_data)
} else {
EventStatus::Added(inner_data)
};
Ok(Event { data, meta })
}
/// Get a reference the underlying event data regardless of whether the
/// event was added or removed.
pub fn
|
(&self) -> &T {
match &self.data {
EventStatus::Added(value) => value,
EventStatus::Removed(value) => value,
}
}
/// Gets a bool representing if the event was added.
pub fn is_added(&self) -> bool {
matches!(&self.data, EventStatus::Added(_))
}
/// Gets a bool representing if the event was removed.
pub fn is_removed(&self) -> bool {
matches!(&self.data, EventStatus::Removed(_))
}
/// Get the underlying event data if the event was added, `None` otherwise.
pub fn added(self) -> Option<T> {
match self.data {
EventStatus::Added(value) => Some(value),
EventStatus::Removed(_) => None,
}
}
/// Get the underlying event data if the event was removed, `None`
/// otherwise.
pub fn removed(self) -> Option<T> {
match self.data {
EventStatus::Removed(value) => Some(value),
EventStatus::Added(_) => None,
}
}
/// Maps the inner data of an event into some other data.
pub fn map<U, F>(self, f: F) -> StreamEvent<U>
where
F: FnOnce(T) -> U,
{
Event {
data: match self.data {
EventStatus::Added(inner) => EventStatus::Added(f(inner)),
EventStatus::Removed(inner) => EventStatus::Removed(f(inner)),
},
meta: self.meta,
}
}
}
/// Additional metadata from the log for the event.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct EventMetadata {
/// The hash of the block where the log was produced.
pub block_hash: H256,
/// The number of the block where the log was produced.
pub block_number: u64,
/// The hash of the transaction this log belongs to.
pub transaction_hash: H256,
/// The block index of the transaction this log belongs to.
pub transaction_index: usize,
/// The index of the log in the block.
pub log_index: usize,
/// The log index in the transaction this log belongs to. This property is
/// non-standard.
pub transaction_log_index: Option<usize>,
/// The log type. Note that this property is non-standard but is supported
/// by Parity nodes.
pub log_type: Option<String>,
}
impl EventMetadata {
fn from_log(log: &Log) -> Option<Self> {
Some(EventMetadata {
block_hash: log.block_hash?,
block_number: log.block_number?.as_u64(),
transaction_hash: log.transaction_hash?,
transaction_index: log.transaction_index?.as_usize(),
log_index: log.log_index?.as_usize(),
transaction_log_index: log.transaction_log_index.map(|index| index.as_usize()),
log_type: log.log_type.clone(),
})
}
}
/// Raw log topics and data for a contract event.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct RawLog {
/// The raw 32-byte topics.
pub topics: Vec<H256>,
/// The raw non-indexed data attached to an event.
pub data: Vec<u8>,
}
impl RawLog {
/// Decode raw log data into a tokenizable for a matching event ABI entry.
pub fn decode<D>(self, event: &AbiEvent) -> Result<D, ExecutionError>
where
D: Detokenize,
{
let event_log = event.parse_log(AbiRawLog {
topics: self.topics,
data: self.data,
})?;
let tokens = event_log
.params
.into_iter()
.map(|param| param.value)
.collect::<Vec<_>>();
let data = D::from_tokens(tokens)?;
Ok(data)
}
}
impl From<Log> for RawLog {
fn from(log: Log) -> Self {
RawLog {
topics: log.topics,
data: log.data.0,
}
}
}
/// Trait for parsing a transaction log into an some event data when the
/// expected event type is not known.
pub trait ParseLog: Sized {
/// Create a new instance by parsing raw log data.
fn parse_log(log: RawLog) -> Result<Self, ExecutionError>;
}
impl ParseLog for RawLog {
fn parse_log(log: RawLog) -> Result<Self, ExecutionError> {
Ok(log)
}
}
|
inner_data
|
mainThreadNotebookDocuments.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { DisposableStore, dispose } from 'vs/base/common/lifecycle';
import { ResourceMap } from 'vs/base/common/map';
import { URI, UriComponents } from 'vs/base/common/uri';
import { BoundModelReferenceCollection } from 'vs/workbench/api/browser/mainThreadDocuments';
import { NotebookCellTextModel } from 'vs/workbench/contrib/notebook/common/model/notebookCellTextModel';
import { NotebookTextModel } from 'vs/workbench/contrib/notebook/common/model/notebookTextModel';
import { NotebookCellsChangeType } from 'vs/workbench/contrib/notebook/common/notebookCommon';
import { INotebookEditorModelResolverService } from 'vs/workbench/contrib/notebook/common/notebookEditorModelResolverService';
import { IUriIdentityService } from 'vs/workbench/services/uriIdentity/common/uriIdentity';
import { ExtHostContext, ExtHostNotebookDocumentsShape, IExtHostContext, MainThreadNotebookDocumentsShape, NotebookCellDto, NotebookCellsChangedEventDto, NotebookDataDto } from '../common/extHost.protocol';
import { MainThreadNotebooksAndEditors } from 'vs/workbench/api/browser/mainThreadNotebookDocumentsAndEditors';
import { NotebookDto } from 'vs/workbench/api/browser/mainThreadNotebookDto';
export class MainThreadNotebookDocuments implements MainThreadNotebookDocumentsShape {
private readonly _disposables = new DisposableStore();
private readonly _proxy: ExtHostNotebookDocumentsShape;
private readonly _documentEventListenersMapping = new ResourceMap<DisposableStore>();
private readonly _modelReferenceCollection: BoundModelReferenceCollection;
constructor(
extHostContext: IExtHostContext,
notebooksAndEditors: MainThreadNotebooksAndEditors,
@INotebookEditorModelResolverService private readonly _notebookEditorModelResolverService: INotebookEditorModelResolverService,
@IUriIdentityService private readonly _uriIdentityService: IUriIdentityService
) {
this._proxy = extHostContext.getProxy(ExtHostContext.ExtHostNotebookDocuments);
this._modelReferenceCollection = new BoundModelReferenceCollection(this._uriIdentityService.extUri);
notebooksAndEditors.onDidAddNotebooks(this._handleNotebooksAdded, this, this._disposables);
notebooksAndEditors.onDidRemoveNotebooks(this._handleNotebooksRemoved, this, this._disposables);
// forward dirty and save events
this._disposables.add(this._notebookEditorModelResolverService.onDidChangeDirty(model => this._proxy.$acceptDirtyStateChanged(model.resource, model.isDirty())));
this._disposables.add(this._notebookEditorModelResolverService.onDidSaveNotebook(e => this._proxy.$acceptModelSaved(e)));
}
dispose(): void {
this._disposables.dispose();
this._modelReferenceCollection.dispose();
dispose(this._documentEventListenersMapping.values());
}
private _handleNotebooksAdded(notebooks: readonly NotebookTextModel[]): void {
for (const textModel of notebooks) {
const disposableStore = new DisposableStore();
disposableStore.add(textModel.onDidChangeContent(event => {
const eventDto: NotebookCellsChangedEventDto = {
versionId: event.versionId,
rawEvents: []
};
for (const e of event.rawEvents) {
switch (e.kind) {
case NotebookCellsChangeType.ModelChange:
eventDto.rawEvents.push({
kind: e.kind,
changes: e.changes.map(diff => [diff[0], diff[1], diff[2].map(cell => NotebookDto.toNotebookCellDto(cell as NotebookCellTextModel))] as [number, number, NotebookCellDto[]])
});
break;
case NotebookCellsChangeType.Move:
eventDto.rawEvents.push({
kind: e.kind,
index: e.index,
length: e.length,
newIdx: e.newIdx,
});
break;
case NotebookCellsChangeType.Output:
eventDto.rawEvents.push({
kind: e.kind,
index: e.index,
outputs: e.outputs.map(NotebookDto.toNotebookOutputDto)
});
break;
case NotebookCellsChangeType.OutputItem:
eventDto.rawEvents.push({
kind: e.kind,
index: e.index,
outputId: e.outputId,
outputItems: e.outputItems.map(NotebookDto.toNotebookOutputItemDto),
append: e.append
});
break;
case NotebookCellsChangeType.ChangeLanguage:
case NotebookCellsChangeType.ChangeCellMetadata:
case NotebookCellsChangeType.ChangeCellInternalMetadata:
eventDto.rawEvents.push(e);
break;
}
}
// using the model resolver service to know if the model is dirty or not.
// assuming this is the first listener it can mean that at first the model
// is marked as dirty and that another event is fired
this._proxy.$acceptModelChanged(
textModel.uri,
eventDto,
this._notebookEditorModelResolverService.isDirty(textModel.uri)
);
const hasDocumentMetadataChangeEvent = event.rawEvents.find(e => e.kind === NotebookCellsChangeType.ChangeDocumentMetadata);
if (hasDocumentMetadataChangeEvent) {
this._proxy.$acceptDocumentPropertiesChanged(textModel.uri, { metadata: textModel.metadata });
}
}));
this._documentEventListenersMapping.set(textModel.uri, disposableStore);
}
}
|
}
}
async $tryCreateNotebook(options: { viewType: string, content?: NotebookDataDto }): Promise<UriComponents> {
const ref = await this._notebookEditorModelResolverService.resolve({ untitledResource: undefined }, options.viewType);
// untitled notebooks are disposed when they get saved. we should not hold a reference
// to such a disposed notebook and therefore dispose the reference as well
ref.object.notebook.onWillDispose(() => {
ref.dispose();
});
// untitled notebooks are dirty by default
this._proxy.$acceptDirtyStateChanged(ref.object.resource, true);
// apply content changes... slightly HACKY -> this triggers a change event
if (options.content) {
const data = NotebookDto.fromNotebookDataDto(options.content);
ref.object.notebook.reset(data.cells, data.metadata, ref.object.notebook.transientOptions);
}
return ref.object.resource;
}
async $tryOpenNotebook(uriComponents: UriComponents): Promise<URI> {
const uri = URI.revive(uriComponents);
const ref = await this._notebookEditorModelResolverService.resolve(uri, undefined);
this._modelReferenceCollection.add(uri, ref);
return uri;
}
async $trySaveNotebook(uriComponents: UriComponents) {
const uri = URI.revive(uriComponents);
const ref = await this._notebookEditorModelResolverService.resolve(uri);
const saveResult = await ref.object.save();
ref.dispose();
return saveResult;
}
}
|
private _handleNotebooksRemoved(uris: URI[]): void {
for (const uri of uris) {
this._documentEventListenersMapping.get(uri)?.dispose();
this._documentEventListenersMapping.delete(uri);
|
test_assignment.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import uuid
import requests_mock
from canvasapi import Canvas
from canvasapi.assignment import Assignment, AssignmentGroup
from canvasapi.exceptions import CanvasException, RequiredFieldMissing
from canvasapi.progress import Progress
from canvasapi.submission import Submission
from canvasapi.user import UserDisplay
from tests import settings
from tests.util import register_uris, cleanup_file
@requests_mock.Mocker()
class TestAssignment(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({'course': ['get_by_id', 'get_assignment_by_id']}, m)
self.course = self.canvas.get_course(1)
self.assignment = self.course.get_assignment(1)
# delete()
def test_delete_assignments(self, m):
register_uris({'assignment': ['delete_assignment']}, m)
deleted_assignment = self.assignment.delete()
self.assertIsInstance(deleted_assignment, Assignment)
# edit()
def test_edit_assignment(self, m):
register_uris({'assignment': ['edit_assignment']}, m)
name = 'New Name'
edited_assignment = self.assignment.edit(assignment={'name': name})
self.assertIsInstance(edited_assignment, Assignment)
self.assertTrue(hasattr(edited_assignment, 'name'))
self.assertEqual(edited_assignment.name, name)
# get_gradeable_students()
def test_get_gradeable_students(self, m):
register_uris({'course': ['list_gradeable_students']}, m)
students = self.assignment.get_gradeable_students()
student_list = [student for student in students]
self.assertEqual(len(student_list), 2)
self.assertIsInstance(student_list[0], UserDisplay)
# get_submission()
def test_get_submission(self, m):
register_uris({
'submission': ['get_by_id_course'],
'user': ['get_by_id']
}, m)
user_id = 1
submission_by_id = self.assignment.get_submission(user_id)
self.assertIsInstance(submission_by_id, Submission)
self.assertTrue(hasattr(submission_by_id, 'submission_type'))
user = self.canvas.get_user(user_id)
submission_by_obj = self.assignment.get_submission(user)
self.assertIsInstance(submission_by_obj, Submission)
self.assertTrue(hasattr(submission_by_obj, 'submission_type'))
# get_submissions()
def test_get_submissions(self, m):
register_uris({'submission': ['list_submissions']}, m)
submissions = self.assignment.get_submissions()
submission_list_by_id = [submission for submission in submissions]
self.assertEqual(len(submission_list_by_id), 2)
self.assertIsInstance(submission_list_by_id[0], Submission)
# submit()
def test_submit(self, m):
register_uris({'assignment': ['submit']}, m)
sub_type = "online_upload"
sub_dict = {'submission_type': sub_type}
submission = self.assignment.submit(sub_dict)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, 'submission_type'))
self.assertEqual(submission.submission_type, sub_type)
def test_submit_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.assignment.submit({})
def test_submit_file(self, m):
register_uris({'assignment': ['submit', 'upload', 'upload_final']}, m)
filename = 'testfile_assignment_{}'.format(uuid.uuid4().hex)
try:
with open(filename, 'w+') as file:
sub_type = "online_upload"
sub_dict = {'submission_type': sub_type}
submission = self.assignment.submit(sub_dict, file)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, 'submission_type'))
self.assertEqual(submission.submission_type, sub_type)
finally:
cleanup_file(filename)
def test_submit_file_wrong_type(self, m):
filename = 'testfile_assignment_{}'.format(uuid.uuid4().hex)
sub_type = "online_text_entry"
sub_dict = {'submission_type': sub_type}
with self.assertRaises(ValueError):
self.assignment.submit(sub_dict, filename)
def test_submit_file_upload_failure(self, m):
register_uris({'assignment': ['submit', 'upload', 'upload_fail']}, m)
filename = 'testfile_assignment_{}'.format(uuid.uuid4().hex)
try:
with open(filename, 'w+') as file:
sub_type = "online_upload"
sub_dict = {'submission_type': sub_type}
with self.assertRaises(CanvasException):
self.assignment.submit(sub_dict, file)
finally:
cleanup_file(filename)
# __str__()
def test__str__(self, m):
string = str(self.assignment)
self.assertIsInstance(string, str)
# submissions_bulk_update()
def test_submissions_bulk_update(self, m):
register_uris({'assignment': ['update_submissions']}, m)
register_uris({'progress': ['course_progress']}, m)
progress = self.assignment.submissions_bulk_update(grade_data={
'1': {
'posted_grade': 97
},
'2': {
'posted_grade': 98
}
})
self.assertIsInstance(progress, Progress)
self.assertTrue(progress.context_type == "Course")
progress = progress.query()
self.assertTrue(progress.context_type == "Course")
# upload_to_submission()
def test_upload_to_submission_self(self, m):
register_uris({'assignment': ['upload', 'upload_final']}, m)
filename = 'testfile_assignment_{}'.format(uuid.uuid4().hex)
try:
with open(filename, 'w+') as file:
response = self.assignment.upload_to_submission(file)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn('url', response[1])
finally:
cleanup_file(filename)
def test_upload_to_submission_user(self, m):
register_uris({'assignment': ['upload_by_id', 'upload_final']}, m)
filename = 'testfile_assignment_{}'.format(uuid.uuid4().hex)
user_id = 1
try:
with open(filename, 'w+') as file:
response = self.assignment.upload_to_submission(file, user_id)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn('url', response[1])
finally:
cleanup_file(filename)
@requests_mock.Mocker()
class TestAssignmentGroup(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({
'course': ['get_by_id'],
'assignment': ['get_assignment_group']
}, m)
self.course = self.canvas.get_course(1)
self.assignment_group = self.course.get_assignment_group(5)
# edit()
def test_edit_assignment_group(self, m):
|
# delete()
def test_delete_assignment_group(self, m):
register_uris({'assignment': ['delete_assignment_group']}, m)
deleted_assignment_group = self.assignment_group.delete()
self.assertIsInstance(deleted_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(deleted_assignment_group, 'name'))
self.assertEqual(deleted_assignment_group.name, 'Assignment Group 5')
# __str__()
def test__str__(self, m):
string = str(self.assignment_group)
self.assertIsInstance(string, str)
|
register_uris({'assignment': ['edit_assignment_group']}, m)
name = 'New Name'
edited_assignment_group = self.assignment_group.edit(
assignment_group={'name': name}
)
self.assertIsInstance(edited_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(edited_assignment_group, 'name'))
self.assertEqual(edited_assignment_group.name, name)
|
spec_processor.go
|
package bundlec
import (
"fmt"
"reflect"
"regexp"
"unicode/utf8"
smith_v1 "github.com/atlassian/smith/pkg/apis/smith/v1"
"github.com/atlassian/smith/pkg/resources"
"github.com/pkg/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
var (
// ?s allows us to match multiline expressions.
reference = regexp.MustCompile(`(?s)^(!+)\{(.+)}$`)
)
type specProcessor struct {
variables map[smith_v1.ReferenceName]interface{}
}
// noExampleError occurs when we try to process the spec with examples rather
// than resolving references, but at least one of references doesn't specify an example.
type noExampleError struct {
referenceName smith_v1.ReferenceName
}
func (e *noExampleError) Error() string {
return fmt.Sprintf("no example value provided in reference %q", e.referenceName)
}
func
|
(err error) bool {
switch typedErr := err.(type) {
case utilerrors.Aggregate:
for _, e := range typedErr.Errors() {
if _, ok := errors.Cause(e).(*noExampleError); !ok {
return false
}
}
return true
case *noExampleError:
return true
default:
return false
}
}
func newSpec(resources map[smith_v1.ResourceName]*resourceInfo, references []smith_v1.Reference) (*specProcessor, error) {
variables, err := resolveAllReferences(references, func(reference smith_v1.Reference) (interface{}, error) {
return resolveReference(resources, reference)
})
if err != nil {
return nil, err
}
return &specProcessor{
variables: variables,
}, nil
}
func newExamplesSpec(references []smith_v1.Reference) (*specProcessor, error) {
variables, err := resolveAllReferences(references, func(reference smith_v1.Reference) (interface{}, error) {
if reference.Example == nil {
return nil, errors.WithStack(&noExampleError{referenceName: reference.Name})
}
return reference.Example, nil
})
if err != nil {
return nil, err
}
return &specProcessor{
variables: variables,
}, nil
}
func resolveAllReferences(
references []smith_v1.Reference,
resolveReference func(reference smith_v1.Reference) (interface{}, error),
) (map[smith_v1.ReferenceName]interface{}, error) {
refs := make(map[smith_v1.ReferenceName]interface{}, len(references))
var errs []error
for _, reference := range references {
// Don't 'resolve' nameless references - they're just being
// used to cause dependencies.
if reference.Name == "" {
continue
}
resolvedRef, err := resolveReference(reference)
if err != nil {
errs = append(errs, err)
continue
}
refs[reference.Name] = resolvedRef
}
if len(errs) > 0 {
return nil, utilerrors.NewAggregate(errs)
}
return refs, nil
}
func (sp *specProcessor) ProcessObject(obj map[string]interface{}, path ...string) error {
for key, value := range obj {
v, err := sp.ProcessValue(value, append(path, key)...)
if err != nil {
return err
}
obj[key] = v
}
return nil
}
func (sp *specProcessor) ProcessValue(value interface{}, path ...string) (interface{}, error) {
switch v := value.(type) {
case string:
return sp.ProcessString(v, path...)
case map[string]interface{}:
if err := sp.ProcessObject(v, path...); err != nil {
return nil, err
}
default:
// handle slices and slices of slices and ... inception. err, reflection
rv := reflect.ValueOf(value)
if rv.Kind() != reflect.Slice {
break
}
length := rv.Len()
// this may change underlying slice type and this is on purpose. E.g. it may be a slice of string
// references, some elements of which need to be turned into structs. That means resulting
// slice may have mixed types.
result := make([]interface{}, length)
for i := 0; i < length; i++ {
res, err := sp.ProcessValue(rv.Index(i).Interface(), append(path, fmt.Sprintf("[%d]", i))...)
if err != nil {
return nil, err
}
result[i] = res
}
value = result
}
return value, nil
}
func (sp *specProcessor) ProcessString(value string, path ...string) (interface{}, error) {
match := reference.FindStringSubmatch(value)
if match == nil {
return value, nil
}
// TODO escaping.
reference, allowed := sp.variables[smith_v1.ReferenceName(match[2])]
if !allowed {
return nil, errors.Errorf("reference does not exist in resource references block: %s", match[2])
}
return reference, nil
}
func resolveReference(resInfos map[smith_v1.ResourceName]*resourceInfo, reference smith_v1.Reference) (interface{}, error) {
resInfo := resInfos[reference.Resource]
if resInfo == nil {
return nil, errors.Errorf("internal dependency resolution error - resource referenced by %q not found in Bundle: %s", reference.Name, reference.Resource)
}
var objToTraverse interface{}
switch reference.Modifier {
case "":
objToTraverse = resInfo.actual.Object
case smith_v1.ReferenceModifierBindSecret:
if resInfo.serviceBindingSecret == nil {
return nil, errors.Errorf("%q requested, but %q is not a ServiceBinding", smith_v1.ReferenceModifierBindSecret, reference.Resource)
}
objToTraverse = resInfo.serviceBindingSecret
default:
return nil, errors.Errorf("reference modifier %q not understood for %q", reference.Modifier, reference.Resource)
}
// To avoid overcomplicated format of path attribute in reference like this: {$.a.string}
// And have something like this instead: a.string
jsonPath := fmt.Sprintf("{$.%s}", reference.Path)
fieldValue, err := resources.GetJSONPathValue(objToTraverse, jsonPath, false)
if err != nil {
return nil, errors.Wrapf(err, "failed to process reference %q", reference.Name)
}
if fieldValue == nil {
return nil, errors.Errorf("field not found: %q", reference.Path)
}
if byteFieldValue, ok := fieldValue.([]byte); ok {
// Secrets are in bytes. We wildly cast them to a string and hope for the best
// so we can put them in the JSON in a 'nice' way.
if !utf8.Valid(byteFieldValue) {
return nil, errors.Errorf("cannot expand non-UTF8 byte array field %q", reference.Path)
}
fieldValue = string(byteFieldValue)
}
return fieldValue, nil
}
|
isNoExampleError
|
ddsketch.go
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddsketch // import "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
import (
"context"
"math"
"sync"
sdk "github.com/DataDog/sketches-go/ddsketch"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
// Config is an alias for the underlying DDSketch config object.
type Config = sdk.Config
// Aggregator aggregates events into a distribution.
type Aggregator struct {
lock sync.Mutex
cfg *Config
kind number.Kind
sketch *sdk.DDSketch
}
var _ export.Aggregator = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
var _ aggregation.Distribution = &Aggregator{}
// New returns a new DDSketch aggregator.
func New(cnt int, desc *metric.Descriptor, cfg *Config) []Aggregator {
if cfg == nil {
cfg = NewDefaultConfig()
}
aggs := make([]Aggregator, cnt)
for i := range aggs {
aggs[i] = Aggregator{
cfg: cfg,
kind: desc.NumberKind(),
sketch: sdk.NewDDSketch(cfg),
}
}
return aggs
}
// Aggregation returns an interface for reading the state of this aggregator.
func (c *Aggregator) Aggregation() aggregation.Aggregation {
return c
}
// Kind returns aggregation.SketchKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.SketchKind
}
// NewDefaultConfig returns a new, default DDSketch config.
func NewDefaultConfig() *Config
|
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (number.Number, error) {
return c.toNumber(c.sketch.Sum()), nil
}
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (uint64, error) {
return uint64(c.sketch.Count()), nil
}
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (number.Number, error) {
return c.Quantile(1)
}
// Min returns the minimum value in the checkpoint.
func (c *Aggregator) Min() (number.Number, error) {
return c.Quantile(0)
}
// Quantile returns the estimated quantile of data in the checkpoint.
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (number.Number, error) {
if c.sketch.Count() == 0 {
return 0, aggregation.ErrNoData
}
f := c.sketch.Quantile(q)
if math.IsNaN(f) {
return 0, aggregation.ErrInvalidQuantile
}
return c.toNumber(f), nil
}
func (c *Aggregator) toNumber(f float64) number.Number {
if c.kind == number.Float64Kind {
return number.NewFloat64Number(f)
}
return number.NewInt64Number(int64(f))
}
// SynchronizedMove saves the current state into oa and resets the current state to
// a new sketch, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
replace := sdk.NewDDSketch(c.cfg)
c.lock.Lock()
if o != nil {
o.sketch = c.sketch
}
c.sketch = replace
c.lock.Unlock()
return nil
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and SynchronizedMove()
// calls.
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
c.lock.Lock()
defer c.lock.Unlock()
c.sketch.Add(number.CoerceToFloat64(desc.NumberKind()))
return nil
}
// Merge combines two sketches into one.
func (c *Aggregator) Merge(oa export.Aggregator, d *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
c.sketch.Merge(o.sketch)
return nil
}
|
{
return sdk.NewDefaultConfig()
}
|
splash.rs
|
use std::cmp;
use unicode_width::UnicodeWidthStr;
use zi::{
backend,
components::border::{Border, BorderProperties},
layout, App, BindingMatch, BindingTransition, Canvas, Colour, Component, ComponentLink, Key,
Layout, Rect, Result, ShouldRender, Size, Style,
};
#[derive(Clone, Debug, PartialEq, Eq)]
struct Theme {
logo: Style,
tagline: Style,
credits: Style,
}
impl Default for Theme {
fn default() -> Self {
const DARK0_SOFT: Colour = Colour::rgb(50, 48, 47);
const LIGHT2: Colour = Colour::rgb(213, 196, 161);
const GRAY_245: Colour = Colour::rgb(146, 131, 116);
const BRIGHT_BLUE: Colour = Colour::rgb(131, 165, 152);
Self {
logo: Style::normal(DARK0_SOFT, LIGHT2),
tagline: Style::normal(DARK0_SOFT, BRIGHT_BLUE),
credits: Style::normal(DARK0_SOFT, GRAY_245),
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
struct Properties {
theme: Theme,
logo: String,
tagline: String,
credits: String,
offset: usize,
}
fn text_block_size(text: &str) -> Size {
let width = text.lines().map(UnicodeWidthStr::width).max().unwrap_or(0);
let height = text.lines().count();
Size::new(width, height)
}
#[derive(Debug)]
struct Splash {
properties: Properties,
frame: Rect,
}
impl Component for Splash {
type Message = usize;
type Properties = Properties;
fn create(properties: Self::Properties, frame: Rect, _link: ComponentLink<Self>) -> Self {
Self { properties, frame }
}
fn change(&mut self, properties: Self::Properties) -> ShouldRender {
|
ShouldRender::Yes
} else {
ShouldRender::No
}
}
fn resize(&mut self, frame: Rect) -> ShouldRender {
self.frame = frame;
ShouldRender::Yes
}
#[inline]
fn view(&self) -> Layout {
let logo_size = text_block_size(&self.properties.logo);
let tagline_size = text_block_size(&self.properties.tagline);
let credits_size = text_block_size(&self.properties.credits);
let theme = Theme::default();
let mut canvas = Canvas::new(self.frame.size);
canvas.clear(theme.logo);
// Draw logo
let middle_x = (self.frame.size.width / 2).saturating_sub(logo_size.width / 2);
let mut middle_y = cmp::min(8, self.frame.size.height.saturating_sub(logo_size.height))
+ self.properties.offset;
for line in self.properties.logo.lines() {
canvas.draw_str(middle_x, middle_y, theme.logo, line);
middle_y += 1;
}
// Draw tagline
middle_y += 2;
let middle_x = (self.frame.size.width / 2).saturating_sub(tagline_size.width / 2);
for line in self.properties.tagline.lines() {
canvas.draw_str(middle_x, middle_y, theme.tagline, line);
middle_y += 1;
}
// Draw credits
middle_y += 1;
let middle_x = (self.frame.size.width / 2).saturating_sub(credits_size.width / 2);
for line in self.properties.credits.lines() {
canvas.draw_str(middle_x, middle_y, theme.credits, line);
middle_y += 1;
}
canvas.into()
}
}
#[derive(Debug)]
struct SplashGrid {
theme: Theme,
link: ComponentLink<Self>,
}
impl Component for SplashGrid {
type Message = usize;
type Properties = ();
fn create(_properties: Self::Properties, _frame: Rect, link: ComponentLink<Self>) -> Self {
Self {
theme: Default::default(),
link,
}
}
fn view(&self) -> Layout {
layout::component::<Border>(
BorderProperties::new(layout::column([layout::auto(layout::component::<Splash>(
Properties {
theme: self.theme.clone(),
logo: SPLASH_LOGO.into(),
tagline: SPLASH_TAGLINE.into(),
credits: SPLASH_CREDITS.into(),
offset: 0,
},
))]))
.style(self.theme.credits),
)
}
fn has_focus(&self) -> bool {
true
}
fn input_binding(&self, pressed: &[Key]) -> BindingMatch<Self::Message> {
let mut transition = BindingTransition::Clear;
let message = match pressed {
[Key::Ctrl('x'), Key::Ctrl('c')] => {
self.link.exit();
None
}
[Key::Ctrl('x')] => {
transition = BindingTransition::Continue;
None
}
_ => None,
};
BindingMatch {
transition,
message,
}
}
}
const SPLASH_LOGO: &str = r#"
▄████████ ▄███████▄ ▄█ ▄████████ ▄████████ ▄█ █▄
███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███
███ █▀ ███ ███ ███ ███ ███ ███ █▀ ███ ███
███ ███ ███ ███ ███ ███ ███ ▄███▄▄▄▄███▄▄
▀███████████ ▀█████████▀ ███ ▀███████████ ▀███████████ ▀▀███▀▀▀▀███▀
███ ███ ███ ███ ███ ███ ███ ███
▄█ ███ ███ ███▌ ▄ ███ ███ ▄█ ███ ███ ███
▄████████▀ ▄████▀ █████▄▄██ ███ █▀ ▄████████▀ ███ █▀
"#;
const SPLASH_TAGLINE: &str = "a splash screen for the terminal";
const SPLASH_CREDITS: &str = "C-x C-c to quit";
fn main() -> Result<()> {
let mut app = App::new(layout::component::<SplashGrid>(()));
app.run_event_loop(backend::crossterm::incremental()?)
}
|
if self.properties != properties {
self.properties = properties;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.